hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2e1ccd54ccad1613ed746053417cd748b2d33875
| 192
|
py
|
Python
|
rvpvp/isa/rvv/vmv_x_s.py
|
ultrafive/riscv-pvp
|
843e38422c3d545352b955764927d5e7847e5453
|
[
"Unlicense"
] | 5
|
2021-05-10T09:57:00.000Z
|
2021-10-05T14:39:20.000Z
|
rvpvp/isa/rvv/vmv_x_s.py
|
ultrafive/riscv-pvp
|
843e38422c3d545352b955764927d5e7847e5453
|
[
"Unlicense"
] | null | null | null |
rvpvp/isa/rvv/vmv_x_s.py
|
ultrafive/riscv-pvp
|
843e38422c3d545352b955764927d5e7847e5453
|
[
"Unlicense"
] | 1
|
2021-05-14T20:24:11.000Z
|
2021-05-14T20:24:11.000Z
|
from ...isa.inst import *
import numpy as np
class Vmv_x_s(Inst):
name = 'vmv.x.s'
# vmv.x.s rd, vs2
# x[rd] = vs2[0] (vs1=0)
def golden(self):
return self['vs2'][0]
| 19.2
| 29
| 0.546875
| 35
| 192
| 2.942857
| 0.571429
| 0.116505
| 0.145631
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.05
| 0.270833
| 192
| 9
| 30
| 21.333333
| 0.685714
| 0.203125
| 0
| 0
| 0
| 0
| 0.066667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.333333
| 0.166667
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 4
|
2e4e6cf733e54eca65a602d7e3bfa467d83a46b3
| 98
|
py
|
Python
|
transfer_app/apps.py
|
qbrc-cnap/cnap
|
624683e91a64c3b4934b578c59db850242d2f94c
|
[
"MIT"
] | 1
|
2021-07-08T14:06:04.000Z
|
2021-07-08T14:06:04.000Z
|
transfer_app/apps.py
|
qbrc-cnap/cnap
|
624683e91a64c3b4934b578c59db850242d2f94c
|
[
"MIT"
] | 12
|
2020-02-12T00:10:53.000Z
|
2021-06-10T21:24:45.000Z
|
transfer_app/apps.py
|
qbrc-cnap/cnap
|
624683e91a64c3b4934b578c59db850242d2f94c
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class TransferAppConfig(AppConfig):
name = 'transfer_app'
| 16.333333
| 35
| 0.77551
| 11
| 98
| 6.818182
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153061
| 98
| 5
| 36
| 19.6
| 0.903614
| 0
| 0
| 0
| 0
| 0
| 0.122449
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
2e53f2b6759936a175fbb3e4afe7b5be958c9128
| 79
|
py
|
Python
|
rest_framework_captcha/__init__.py
|
leonardoarroyo/restframework-catpcha
|
c1acf5904ba8c48bb58e104195380b0bbce1ed8e
|
[
"MIT"
] | null | null | null |
rest_framework_captcha/__init__.py
|
leonardoarroyo/restframework-catpcha
|
c1acf5904ba8c48bb58e104195380b0bbce1ed8e
|
[
"MIT"
] | null | null | null |
rest_framework_captcha/__init__.py
|
leonardoarroyo/restframework-catpcha
|
c1acf5904ba8c48bb58e104195380b0bbce1ed8e
|
[
"MIT"
] | 1
|
2020-12-11T21:38:09.000Z
|
2020-12-11T21:38:09.000Z
|
default_app_config = 'rest_framework_captcha.apps.RestFrameworkCaptchaConfig'
| 26.333333
| 77
| 0.886076
| 8
| 79
| 8.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.050633
| 79
| 2
| 78
| 39.5
| 0.88
| 0
| 0
| 0
| 0
| 0
| 0.692308
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
2e6262da82cf152dfcab69d6e93ee5086457a9ff
| 153
|
py
|
Python
|
bin/trigs/tritrigs-triangle-4.py
|
tiwo/puzzler
|
7ad3d9a792f0635f7ec59ffa85fb46b54fd77a7e
|
[
"Intel"
] | null | null | null |
bin/trigs/tritrigs-triangle-4.py
|
tiwo/puzzler
|
7ad3d9a792f0635f7ec59ffa85fb46b54fd77a7e
|
[
"Intel"
] | null | null | null |
bin/trigs/tritrigs-triangle-4.py
|
tiwo/puzzler
|
7ad3d9a792f0635f7ec59ffa85fb46b54fd77a7e
|
[
"Intel"
] | 1
|
2022-01-02T16:54:14.000Z
|
2022-01-02T16:54:14.000Z
|
#!/usr/bin/env python
# $Id$
"""
1 solution.
"""
import puzzler
from puzzler.puzzles.tritrigs import TritrigsTriangle4
puzzler.run(TritrigsTriangle4)
| 12.75
| 54
| 0.751634
| 18
| 153
| 6.388889
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022222
| 0.117647
| 153
| 11
| 55
| 13.909091
| 0.82963
| 0.24183
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
2e7ad10848efe85c68fbc9621bee7802f36c7718
| 100
|
py
|
Python
|
saleor/request_order/apps.py
|
AkioSky/FishMart
|
1d01d7e79812dc7cccb1b26ffc6457af6104d9f2
|
[
"BSD-3-Clause"
] | null | null | null |
saleor/request_order/apps.py
|
AkioSky/FishMart
|
1d01d7e79812dc7cccb1b26ffc6457af6104d9f2
|
[
"BSD-3-Clause"
] | null | null | null |
saleor/request_order/apps.py
|
AkioSky/FishMart
|
1d01d7e79812dc7cccb1b26ffc6457af6104d9f2
|
[
"BSD-3-Clause"
] | null | null | null |
from django.apps import AppConfig
class RequestOrderConfig(AppConfig):
name = 'request_order'
| 16.666667
| 36
| 0.78
| 11
| 100
| 7
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15
| 100
| 5
| 37
| 20
| 0.905882
| 0
| 0
| 0
| 0
| 0
| 0.13
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
2e7ddad188be683aa53d4150da78432b51cf923a
| 120
|
py
|
Python
|
week-6/flask-api-small/scripts/prediction.py
|
cgsanders/stat418-tools-in-datascience
|
4a86fd15386f47d0a59422226b13834bae5a1387
|
[
"MIT"
] | 4
|
2019-04-21T08:49:52.000Z
|
2019-09-22T14:07:26.000Z
|
week-6/flask-api-small/scripts/prediction.py
|
cgsanders/stat418-tools-in-datascience
|
4a86fd15386f47d0a59422226b13834bae5a1387
|
[
"MIT"
] | 58
|
2019-04-22T15:44:14.000Z
|
2019-06-04T19:50:54.000Z
|
week-6/flask-api-small/scripts/prediction.py
|
cgsanders/stat418-tools-in-datascience
|
4a86fd15386f47d0a59422226b13834bae5a1387
|
[
"MIT"
] | 33
|
2019-04-03T01:48:34.000Z
|
2021-04-30T06:36:29.000Z
|
#!/usr/bin/env python3
def predict(dict_values):
x1 = float(dict_values["x"])
y_pred = 2**x1
return y_pred
| 17.142857
| 32
| 0.641667
| 20
| 120
| 3.65
| 0.75
| 0.273973
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.042105
| 0.208333
| 120
| 6
| 33
| 20
| 0.726316
| 0.175
| 0
| 0
| 0
| 0
| 0.010204
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
2e8f0b81bbd9561984ac38ea8dfc729ccec17dd2
| 236
|
py
|
Python
|
organizerui/model.py
|
angusmacdonald/gopro-organizer
|
6fc77fab231d016186b4bb4df5e24ecf039b72d9
|
[
"MIT"
] | 6
|
2016-04-23T23:44:10.000Z
|
2020-10-30T07:35:19.000Z
|
organizerui/model.py
|
angusmacdonald/gopro-organizer
|
6fc77fab231d016186b4bb4df5e24ecf039b72d9
|
[
"MIT"
] | null | null | null |
organizerui/model.py
|
angusmacdonald/gopro-organizer
|
6fc77fab231d016186b4bb4df5e24ecf039b72d9
|
[
"MIT"
] | null | null | null |
import sys
sys.path.append('')
from organizercore import organizer
class OrganizerModel:
def start_processing(self, input_dir, output_dir, settings):
organizer.Organizer(settings).process_gopro_dir(input_dir, output_dir)
| 23.6
| 78
| 0.783898
| 30
| 236
| 5.933333
| 0.633333
| 0.089888
| 0.157303
| 0.191011
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131356
| 236
| 9
| 79
| 26.222222
| 0.868293
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.333333
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
5cea000fbf7f6010b16ca5c88addb7c29ae330a8
| 25
|
py
|
Python
|
src/__init__.py
|
dmitrijbozhkov/cloudcourseproject
|
3e62a5fafef418c1c058587abc5615b03fc2325a
|
[
"Apache-2.0"
] | null | null | null |
src/__init__.py
|
dmitrijbozhkov/cloudcourseproject
|
3e62a5fafef418c1c058587abc5615b03fc2325a
|
[
"Apache-2.0"
] | 7
|
2021-02-08T20:41:23.000Z
|
2022-03-12T00:21:37.000Z
|
src/__init__.py
|
dmitrijbozhkov/cloudcourseproject
|
3e62a5fafef418c1c058587abc5615b03fc2325a
|
[
"Apache-2.0"
] | null | null | null |
""" Application files """
| 25
| 25
| 0.64
| 2
| 25
| 8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12
| 25
| 1
| 25
| 25
| 0.727273
| 0.68
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
5cee76e061dea1eed3dca0e1ad8fc1077c7c5235
| 214
|
py
|
Python
|
rameniaapp/forms/create_list.py
|
awlane/ramenia
|
6bf8e75a1f279ac584daa4ee19927ffccaa67551
|
[
"MIT"
] | null | null | null |
rameniaapp/forms/create_list.py
|
awlane/ramenia
|
6bf8e75a1f279ac584daa4ee19927ffccaa67551
|
[
"MIT"
] | null | null | null |
rameniaapp/forms/create_list.py
|
awlane/ramenia
|
6bf8e75a1f279ac584daa4ee19927ffccaa67551
|
[
"MIT"
] | null | null | null |
from django import forms
from django.forms.widgets import TextInput
class ListCreateForm(forms.Form):
list_name = forms.CharField(label="Name", max_length=60, widget=TextInput(attrs={'class':'form-control'}))
| 35.666667
| 110
| 0.775701
| 29
| 214
| 5.655172
| 0.655172
| 0.121951
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010309
| 0.093458
| 214
| 5
| 111
| 42.8
| 0.835052
| 0
| 0
| 0
| 0
| 0
| 0.098131
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
cf431e72726e4b11c54c98c1b966e61f78dddfff
| 7,755
|
py
|
Python
|
source/code/tag_utilities.py
|
awslabs/tag-tamer
|
bfd164c36b5e3ba8e01aba54d973ce372e982b09
|
[
"MIT",
"MIT-0"
] | 15
|
2021-06-27T23:42:37.000Z
|
2021-09-24T19:40:00.000Z
|
source/code/tag_utilities.py
|
awslabs/tag-tamer
|
bfd164c36b5e3ba8e01aba54d973ce372e982b09
|
[
"MIT",
"MIT-0"
] | 7
|
2021-07-05T06:56:46.000Z
|
2021-08-06T00:59:36.000Z
|
source/code/tag_utilities.py
|
awslabs/tag-tamer
|
bfd164c36b5e3ba8e01aba54d973ce372e982b09
|
[
"MIT",
"MIT-0"
] | 5
|
2021-06-23T17:59:01.000Z
|
2021-10-20T14:22:44.000Z
|
"""
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: MIT-0
Tag Tamer utility functions to evaluate resource tags
"""
import logging
# Instantiate logging for this module using its file name
log = logging.getLogger(__name__)
def tag_filter_matcher(
conjunction=None,
tag_key1_state=None,
tag_value1_state=None,
tag_key2_state=None,
tag_value2_state=None,
resource_inventory=None,
filter_tags=None,
tag_dict=None,
resource_name=None,
resource_arn=None,
):
"""Updates the passed resource_inventory dictionary with ARN & name of all resources matching the
user-selected filter tag keys & values. User-selected filter tag keys & tag key:value combinations
are AND'ed or OR'ed based on value of conjunction.
"""
def _intersection_union_invalid(tag_dict, resource_name, resource_arn):
resource_inventory.clear()
def _intersection_union_fftt(tag_dict, resource_name, resource_arn):
if tag_dict.get(filter_tags.get("tag_key2")) == filter_tags.get("tag_value2"):
resource_inventory[resource_arn] = resource_name
def _intersection_union_ttff(tag_dict, resource_name, resource_arn):
if tag_dict.get(filter_tags.get("tag_key1")) == filter_tags.get("tag_value1"):
resource_inventory[resource_arn] = resource_name
def _intersection_tfff(tag_dict, resource_name, resource_arn):
if filter_tags.get("tag_key1") in tag_dict:
resource_inventory[resource_arn] = resource_name
def _intersection_fftf(tag_dict, resource_name, resource_arn):
if filter_tags.get("tag_key2") in tag_dict:
resource_inventory[resource_arn] = resource_name
def _intersection_tftf(tag_dict, resource_name, resource_arn):
if (
filter_tags.get("tag_key1") in tag_dict
and filter_tags.get("tag_key2") in tag_dict
):
resource_inventory[resource_arn] = resource_name
def _intersection_tftt(tag_dict, resource_name, resource_arn):
if (
filter_tags.get("tag_key1") in tag_dict
and filter_tags.get("tag_key2") in tag_dict
):
if tag_dict.get(filter_tags.get("tag_key2")) == filter_tags.get(
"tag_value2"
):
resource_inventory[resource_arn] = resource_name
def _intersection_tttf(tag_dict, resource_name, resource_arn):
if (
filter_tags.get("tag_key1") in tag_dict
and filter_tags.get("tag_key2") in tag_dict
):
if tag_dict.get(filter_tags.get("tag_key1")) == filter_tags.get(
"tag_value1"
):
resource_inventory[resource_arn] = resource_name
def _intersection_tttt(tag_dict, resource_name, resource_arn):
if tag_dict.get(filter_tags.get("tag_key1")) == filter_tags.get(
"tag_value1"
) and tag_dict.get(filter_tags.get("tag_key2")) == filter_tags.get(
"tag_value2"
):
resource_inventory[resource_arn] = resource_name
def _intersection_ffff(tag_dict, resource_name, resource_arn):
resource_inventory[resource_arn] = resource_name
def _union_tfff_tftf_fftf(tag_dict, resource_name, resource_arn):
if (
filter_tags.get("tag_key1") in tag_dict
or filter_tags.get("tag_key2") in tag_dict
):
resource_inventory[resource_arn] = resource_name
def _union_tttf(tag_dict, resource_name, resource_arn):
if filter_tags.get("tag_key1") in tag_dict:
if tag_dict[filter_tags.get("tag_key1")] == filter_tags.get("tag_value1"):
resource_inventory[resource_arn] = resource_name
elif filter_tags.get("tag_key2") in tag_dict:
resource_inventory[resource_arn] = resource_name
def _union_tftt(tag_dict, resource_name, resource_arn):
if filter_tags.get("tag_key2") in tag_dict:
if tag_dict[filter_tags.get("tag_key2")] == filter_tags.get("tag_value2"):
resource_inventory[resource_arn] = resource_name
elif filter_tags.get("tag_key1") in tag_dict:
resource_inventory[resource_arn] = resource_name
def _union_tttt(tag_dict, resource_name, resource_arn):
if tag_dict.get(filter_tags.get("tag_key1")) == filter_tags.get(
"tag_value1"
) or tag_dict.get(filter_tags.get("tag_key2")) == filter_tags.get("tag_value2"):
resource_inventory[resource_arn] = resource_name
def _union_ffff(tag_dict, resource_name, resource_arn):
resource_inventory[resource_arn] = resource_name
# "AND" Truth table check for tag_key1, tag_value1, tag_key2, tag_value2
intersection_combos = {
(False, False, False, True): _intersection_union_invalid,
(False, True, False, False): _intersection_union_invalid,
(False, True, False, True): _intersection_union_invalid,
(True, False, False, True): _intersection_union_invalid,
(True, True, False, True): _intersection_union_invalid,
(False, True, True, False): _intersection_union_invalid,
(False, False, True, False): _intersection_fftf,
(False, False, True, True): _intersection_union_fftt,
(True, False, False, False): _intersection_tfff,
(True, True, False, False): _intersection_union_ttff,
(True, False, True, False): _intersection_tftf,
(True, False, True, True): _intersection_tftt,
(True, True, True, False): _intersection_tttf,
(True, True, True, True): _intersection_tttt,
(False, False, False, False): _intersection_ffff,
}
# "OR" Truth table check for tag_key1, tag_value1, tag_key2, tag_value2
union_combos = {
(False, False, False, True): _intersection_union_invalid,
(False, True, False, False): _intersection_union_invalid,
(False, True, False, True): _intersection_union_invalid,
(False, True, True, True): _intersection_union_invalid,
(True, True, False, True): _intersection_union_invalid,
(False, False, True, False): _union_tfff_tftf_fftf,
(False, False, True, True): _intersection_union_fftt,
(True, False, False, False): _union_tfff_tftf_fftf,
(True, False, True, False): _union_tfff_tftf_fftf,
(True, False, True, True): _union_tftt,
(True, True, False, False): _intersection_union_ttff,
(True, True, True, False): _union_tttf,
(True, True, True, True): _union_tttt,
(False, False, False, False): _union_ffff,
}
if conjunction == "AND":
intersection_combos[
(
tag_key1_state,
tag_value1_state,
tag_key2_state,
tag_value2_state,
)
](
tag_dict,
resource_name,
resource_arn,
)
elif conjunction == "OR":
union_combos[
(
tag_key1_state,
tag_value1_state,
tag_key2_state,
tag_value2_state,
)
](
tag_dict,
resource_name,
resource_arn,
)
else:
_intersection_union_invalid(tag_dict, resource_name, resource_arn)
def get_tag_filter_key_value_states(filter_tags=None):
tag_key1_state = True if filter_tags.get("tag_key1") else False
tag_value1_state = True if filter_tags.get("tag_value1") else False
tag_key2_state = True if filter_tags.get("tag_key2") else False
tag_value2_state = True if filter_tags.get("tag_value2") else False
return tag_key1_state, tag_value1_state, tag_key2_state, tag_value2_state
| 40.602094
| 102
| 0.660477
| 970
| 7,755
| 4.878351
| 0.095876
| 0.063609
| 0.104396
| 0.128487
| 0.792265
| 0.75951
| 0.754861
| 0.717244
| 0.678994
| 0.651733
| 0
| 0.011465
| 0.246422
| 7,755
| 190
| 103
| 40.815789
| 0.798255
| 0.076725
| 0
| 0.490196
| 0
| 0
| 0.04677
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.006536
| 0
| 0.124183
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
cf51ab45924c97d384d57fc81c4e9f5c32da4311
| 23
|
py
|
Python
|
samtranslator/__init__.py
|
bhumikapaharia/serverless-application-model
|
4161fdd59f1ec449877a64796401ca074ae7be02
|
[
"Apache-2.0"
] | 4
|
2021-12-18T06:44:57.000Z
|
2021-12-28T09:52:53.000Z
|
samtranslator/__init__.py
|
bhumikapaharia/serverless-application-model
|
4161fdd59f1ec449877a64796401ca074ae7be02
|
[
"Apache-2.0"
] | 1
|
2021-04-13T17:54:21.000Z
|
2021-04-13T17:54:21.000Z
|
samtranslator/__init__.py
|
chrisoverzero/serverless-application-model
|
f297cfb7bb68c75b3a75da49c9488e62bad16347
|
[
"Apache-2.0"
] | null | null | null |
__version__ = "1.35.0"
| 11.5
| 22
| 0.652174
| 4
| 23
| 2.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 0.130435
| 23
| 1
| 23
| 23
| 0.35
| 0
| 0
| 0
| 0
| 0
| 0.26087
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
cf58f44a787c70c43d9a1a1e3d53a92ccd902710
| 7,019
|
py
|
Python
|
fusion_platform/translations.py
|
d-cat-support/fusion-platform-python-sdk
|
6f98a60f33a962f6a10861da15affbc28bf4a17a
|
[
"MIT"
] | null | null | null |
fusion_platform/translations.py
|
d-cat-support/fusion-platform-python-sdk
|
6f98a60f33a962f6a10861da15affbc28bf4a17a
|
[
"MIT"
] | null | null | null |
fusion_platform/translations.py
|
d-cat-support/fusion-platform-python-sdk
|
6f98a60f33a962f6a10861da15affbc28bf4a17a
|
[
"MIT"
] | null | null | null |
"""
Compiled translations.
author: Matthew Casey
© [Digital Content Analysis Technology Ltd](https://www.d-cat.co.uk)
"""
# Do not modify this file manually as it is built automatically by the localisations.py script.
import i18n
# @formatter:off
i18n.add_translation('session.request_failed', 'API request failed: %{message}', 'en')
i18n.add_translation('session.login_failed', 'Login failed', 'en')
i18n.add_translation('session.missing_password', 'Password must be specified', 'en')
i18n.add_translation('session.missing_email_user_id', 'Either an email address or a user id must be specified', 'en')
i18n.add_translation('fusion_platform.support', 'Support: support@d-cat.co.uk', 'en')
i18n.add_translation('fusion_platform.version_date', 'Date: %{version_date}', 'en')
i18n.add_translation('fusion_platform.version', 'Version: %{version}', 'en')
i18n.add_translation('fusion_platform.sdk', 'Fusion Platform(r) SDK', 'en')
i18n.add_translation('models.data_file.failed_download_url', 'Failed to get URL from download file response', 'en')
i18n.add_translation('models.data_file.no_download', 'No download is in progress', 'en')
i18n.add_translation('models.data_file.download_already_in_progress', 'Cannot download file as the download is already in progress', 'en')
i18n.add_translation('models.data_file.organisation_id.description', 'The owning organisation.', 'en')
i18n.add_translation('models.data_file.organisation_id.title', 'Organisation', 'en')
i18n.add_translation('models.data.no_create', 'No create is in progress', 'en')
i18n.add_translation('models.data.failed_add_missing_file', 'Failed to add file as the file does not exist: %{file}', 'en')
i18n.add_translation('models.data.failed_add_file_not_unique', 'Failed to add file as the id is not unique', 'en')
i18n.add_translation('models.data.failed_add_file_url', 'Failed to get URL from add file response', 'en')
i18n.add_translation('models.data.failed_add_file_id', 'Failed to get id from add file response', 'en')
i18n.add_translation('models.process_execution.execution_failed', 'Execution has failed', 'en')
i18n.add_translation('models.fields.uuid.invalid_uuid', 'Not a valid utf-8 string', 'en')
i18n.add_translation('models.fields.url.invalid_url', 'Not a valid URL', 'en')
i18n.add_translation('models.fields.tuple.invalid', 'Not a valid tuple', 'en')
i18n.add_translation('models.fields.timedelta.invalid', 'Not a valid period of time', 'en')
i18n.add_translation('models.fields.string.invalid_utf8', 'Not a valid utf-8 string', 'en')
i18n.add_translation('models.fields.string.invalid', 'Not a valid string', 'en')
i18n.add_translation('models.fields.relativedelta.invalid', 'Not a valid relative period of time', 'en')
i18n.add_translation('models.fields.nested.type', 'Invalid type', 'en')
i18n.add_translation('models.fields.list.invalid', 'Not a valid list', 'en')
i18n.add_translation('models.fields.ip.invalid_ip', 'Not a valid IP address', 'en')
i18n.add_translation('models.fields.integer.too_large', 'Integer too large', 'en')
i18n.add_translation('models.fields.integer.invalid', 'Not a valid integer', 'en')
i18n.add_translation('models.fields.float.special', 'Special numeric values (nan or infinity) are not permitted.', 'en')
i18n.add_translation('models.fields.float.too_large', 'Float too large', 'en')
i18n.add_translation('models.fields.float.invalid', 'Not a valid float', 'en')
i18n.add_translation('models.fields.email.invalid', 'Not a valid email address', 'en')
i18n.add_translation('models.fields.dict.invalid', 'Not a valid dictionary', 'en')
i18n.add_translation('models.fields.decimal.special', 'Special numeric values (nan or infinity) are not permitted', 'en')
i18n.add_translation('models.fields.decimal.too_large', 'Decimal too large', 'en')
i18n.add_translation('models.fields.decimal.invalid', 'Not a valid decimal', 'en')
i18n.add_translation('models.fields.datetime.format', '\'{input}\' cannot be formatted as a {obj_type}', 'en')
i18n.add_translation('models.fields.datetime.invalid_awareness', 'Not a valid {awareness} {obj_type}', 'en')
i18n.add_translation('models.fields.datetime.invalid', 'Not a valid {obj_type}', 'en')
i18n.add_translation('models.fields.boolean.invalid', 'Not a valid boolean', 'en')
i18n.add_translation('models.model.update_empty_body', 'Update cannot be requested as there are no attributes to be used (read-only attributes have been removed)', 'en')
i18n.add_translation('models.model.create_empty_body', 'Create cannot be requested as there are no attributes to be used (read-only attributes have been removed)', 'en')
i18n.add_translation('models.model.failed_model_validation', 'Failed to validate model: %{message}', 'en')
i18n.add_translation('models.model.failed_model_new', 'Failed to get model template from response', 'en')
i18n.add_translation('models.model.failed_model_send_and_load', 'Failed to request and load model', 'en')
i18n.add_translation('models.model.no_such_keys', 'No such keys %{keys}', 'en')
i18n.add_translation('models.model.readonly_property', 'Property %{property} is read-only and cannot be set', 'en')
i18n.add_translation('models.model.not_persisted', 'Model is not persisted in the Fusion Platform(r)', 'en')
i18n.add_translation('models.model.already_persisted', 'Model is already persisted in the Fusion Platform(r)', 'en')
i18n.add_translation('models.process.execution_should_have_started', 'Process execution should have started by now', 'en')
i18n.add_translation('models.process.not_executable', 'Process is not executable', 'en')
i18n.add_translation('models.process.wrong_file_type', 'File type of supplied data object (%{actual}) does not match the file type for the input (%{expected})', 'en')
i18n.add_translation('models.process.data_not_ready', 'Data object is not ready to be used in a process', 'en')
i18n.add_translation('models.process.option_wrong_type', 'Option value should be of type %{type}', 'en')
i18n.add_translation('models.process.cannot_find_option', 'No such option', 'en')
i18n.add_translation('models.process.cannot_find_input', 'No such input', 'en')
i18n.add_translation('models.process.option_not_specified', 'Option name or object must be provided to set option', 'en')
i18n.add_translation('models.process.data_not_specified', 'Data object must be provided to set input', 'en')
i18n.add_translation('models.process.input_not_specified', 'Input number or object must be provided to set input', 'en')
i18n.add_translation('models.process.no_change_executing', 'Process cannot be modified as it is currently executing', 'en')
i18n.add_translation('models.process.option.constrained_values.description', 'The constrained values for the option.', 'en')
i18n.add_translation('models.process.option.constrained_values.title', 'Constrained Values', 'en')
i18n.add_translation('models.process.option.constrained_names.description', 'The constrained value names for the option.', 'en')
i18n.add_translation('models.process.option.constrained_names.title', 'Constrained Names', 'en')
# @formatter:on
| 85.597561
| 169
| 0.774327
| 1,043
| 7,019
| 5.055609
| 0.173538
| 0.088944
| 0.228712
| 0.250332
| 0.646691
| 0.627157
| 0.524559
| 0.390669
| 0.296416
| 0.183197
| 0
| 0.021574
| 0.082063
| 7,019
| 81
| 170
| 86.654321
| 0.796834
| 0.034763
| 0
| 0
| 0
| 0.044118
| 0.668884
| 0.306726
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.014706
| 0.014706
| 0
| 0.014706
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
cf63128ae3837cdf01a72550d0f6236a6665d83c
| 35
|
py
|
Python
|
scripts/tcutils/tests/cores_ut.py
|
rombie/contrail-test
|
a68c71d6f282142501a7e2e889bbb232fdd82dc3
|
[
"Apache-2.0"
] | 5
|
2020-09-29T00:36:57.000Z
|
2022-02-16T06:51:32.000Z
|
tcutils/tests/cores_ut.py
|
vkolli/contrail-test-perf
|
db04b8924a2c330baabe3059788b149d957a7d67
|
[
"Apache-2.0"
] | 27
|
2019-11-02T02:18:34.000Z
|
2022-02-24T18:49:08.000Z
|
tcutils/tests/cores_ut.py
|
vkolli/contrail-test-perf
|
db04b8924a2c330baabe3059788b149d957a7d67
|
[
"Apache-2.0"
] | 20
|
2019-11-28T16:02:25.000Z
|
2022-01-06T05:56:58.000Z
|
"""Unittests for cores module.
"""
| 11.666667
| 30
| 0.657143
| 4
| 35
| 5.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 35
| 2
| 31
| 17.5
| 0.766667
| 0.771429
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
cf815298accec6c14c7afef44e976c7b6069c135
| 73
|
py
|
Python
|
scalabel/tools/__init__.py
|
cwlroda/scalabel
|
296b7f3050ec0d02b4578d9d1f174ffd22aee3fb
|
[
"Apache-2.0"
] | 279
|
2019-11-18T01:48:39.000Z
|
2022-03-30T00:16:43.000Z
|
scalabel/tools/__init__.py
|
cwlroda/scalabel
|
296b7f3050ec0d02b4578d9d1f174ffd22aee3fb
|
[
"Apache-2.0"
] | 141
|
2019-11-20T02:36:11.000Z
|
2022-03-29T15:17:46.000Z
|
scalabel/tools/__init__.py
|
cwlroda/scalabel
|
296b7f3050ec0d02b4578d9d1f174ffd22aee3fb
|
[
"Apache-2.0"
] | 85
|
2019-11-18T06:10:12.000Z
|
2022-03-27T12:32:55.000Z
|
"""Tools for using scalabel."""
from . import edit_labels, prepare_data
| 18.25
| 39
| 0.739726
| 10
| 73
| 5.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136986
| 73
| 3
| 40
| 24.333333
| 0.825397
| 0.342466
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
d85f295a43e1700c84c02249db04af993786f746
| 26
|
py
|
Python
|
homeassistant/components/imap/__init__.py
|
domwillcode/home-assistant
|
f170c80bea70c939c098b5c88320a1c789858958
|
[
"Apache-2.0"
] | 30,023
|
2016-04-13T10:17:53.000Z
|
2020-03-02T12:56:31.000Z
|
homeassistant/components/imap/__init__.py
|
jagadeeshvenkatesh/core
|
1bd982668449815fee2105478569f8e4b5670add
|
[
"Apache-2.0"
] | 31,101
|
2020-03-02T13:00:16.000Z
|
2022-03-31T23:57:36.000Z
|
homeassistant/components/imap/__init__.py
|
jagadeeshvenkatesh/core
|
1bd982668449815fee2105478569f8e4b5670add
|
[
"Apache-2.0"
] | 11,956
|
2016-04-13T18:42:31.000Z
|
2020-03-02T09:32:12.000Z
|
"""The imap component."""
| 13
| 25
| 0.615385
| 3
| 26
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115385
| 26
| 1
| 26
| 26
| 0.695652
| 0.730769
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
d860dd72da3f6f3d31741e148127e543434eba7d
| 4,552
|
py
|
Python
|
tests/pv_generation.py
|
Aloso/pv-simulator
|
daae4a00c4d7c759eedeffe39b6d60395e9a3ebf
|
[
"MIT"
] | null | null | null |
tests/pv_generation.py
|
Aloso/pv-simulator
|
daae4a00c4d7c759eedeffe39b6d60395e9a3ebf
|
[
"MIT"
] | null | null | null |
tests/pv_generation.py
|
Aloso/pv-simulator
|
daae4a00c4d7c759eedeffe39b6d60395e9a3ebf
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import sys, os, random, unittest, itertools
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# pylint: disable=import-error
from libpv.pv_generation import PvGenerator, weather
from libpv.time_of_day import TimeOfDay
def generate_360_times():
t = TimeOfDay(0)
while True:
yield t
t += 240
if t.seconds() < 240:
break
class TestPvGeneration(unittest.TestCase):
def testEquality(self):
gen = PvGenerator(TimeOfDay.from_hms(8), TimeOfDay.from_hms(20), 3500)
self.assertEqual(
[round(gen.get_value(x)) for x in generate_360_times()],
[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11,
22, 32, 43, 54, 65, 76, 86, 97, 108, 119, 130, 140, 151,
162, 173, 184, 194, 205, 216, 229, 304, 378, 451, 523,
595, 665, 735, 803, 871, 938, 1004, 1069, 1134, 1197,
1260, 1322, 1383, 1443, 1502, 1560, 1618, 1674, 1730,
1785, 1839, 1892, 1944, 1996, 2046, 2096, 2145, 2193,
2240, 2286, 2332, 2376, 2420, 2463, 2504, 2545, 2586,
2625, 2663, 2701, 2738, 2774, 2809, 2843, 2876, 2908,
2940, 2971, 3000, 3029, 3058, 3085, 3111, 3137, 3161,
3185, 3208, 3230, 3251, 3271, 3291, 3309, 3327, 3344,
3360, 3375, 3389, 3403, 3415, 3427, 3438, 3448, 3457,
3465, 3472, 3479, 3484, 3489, 3493, 3496, 3498, 3500,
3500, 3500, 3498, 3496, 3493, 3489, 3484, 3479, 3472,
3465, 3457, 3448, 3438, 3427, 3415, 3403, 3389, 3375,
3360, 3344, 3327, 3309, 3291, 3271, 3251, 3230, 3208,
3185, 3161, 3137, 3111, 3085, 3058, 3029, 3000, 2971,
2940, 2908, 2876, 2843, 2809, 2774, 2738, 2701, 2663,
2625, 2586, 2545, 2504, 2463, 2420, 2376, 2332, 2286,
2240, 2193, 2145, 2096, 2046, 1996, 1944, 1892, 1839,
1785, 1730, 1674, 1618, 1560, 1502, 1443, 1383, 1322,
1260, 1197, 1134, 1069, 1004, 938, 871, 803, 735, 665,
595, 523, 451, 378, 304, 229, 216, 205, 194, 184, 173, 162,
151, 140, 130, 119, 108, 97, 86, 76, 65, 54, 43, 32, 22, 11,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
])
def testContinuityAndBounds(self):
gen = PvGenerator(TimeOfDay.from_hms(8), TimeOfDay.from_hms(20), 3500)
last = gen.get_value(TimeOfDay(0))
for time in generate_360_times():
power = gen.get_value(time)
self.assertGreaterEqual(power, 0)
self.assertLessEqual(power, 3500)
self.assertLess(abs(power - last), 80)
last = power
class TestWeatherGeneration(unittest.TestCase):
def testEquality(self):
w = weather(0.6, random.Random(4))
self.assertEqual(
[round(x * 10, 3) for x in itertools.islice(w, 100)],
[
9.201, 9.197, 9.193, 9.19, 9.186, 9.183, 9.179, 9.175, 9.172, 9.168,
9.165, 9.161, 9.158, 9.154, 9.15, 9.147, 9.143, 9.14, 9.137, 9.133,
9.13, 9.126, 9.123, 9.119, 9.116, 9.113, 9.109, 9.106, 9.102, 9.099,
9.096, 9.102, 9.107, 9.113, 9.119, 9.124, 9.13, 9.136, 9.141, 9.147,
9.152, 9.158, 9.164, 9.169, 9.175, 9.18, 9.186, 9.191, 9.197, 9.202,
9.208, 9.213, 9.219, 9.224, 9.23, 9.235, 9.24, 9.246, 9.251, 9.257,
9.253, 9.25, 9.247, 9.243, 9.24, 9.237, 9.234, 9.23, 9.227, 9.224,
9.221, 9.218, 9.215, 9.211, 9.208, 9.205, 9.202, 9.199, 9.196, 9.193,
9.19, 9.187, 9.183, 9.18, 9.177, 9.174, 9.171, 9.168, 9.165, 9.162,
9.159, 9.157, 9.154, 9.151, 9.148, 9.145, 9.142, 9.139, 9.136, 9.133,
])
def testNoiseFactor(self):
w = weather(0.4, random.Random(3))
for n in itertools.islice(w, 200_000):
self.assertLessEqual(0.6, n)
self.assertLessEqual(n, 1)
if __name__ == '__main__':
unittest.main()
| 47.915789
| 85
| 0.504174
| 756
| 4,552
| 2.997355
| 0.345238
| 0.126214
| 0.186673
| 0.245366
| 0.172551
| 0.114298
| 0.114298
| 0.114298
| 0.114298
| 0.114298
| 0
| 0.450839
| 0.331942
| 4,552
| 94
| 86
| 48.425532
| 0.294311
| 0.010765
| 0
| 0.175
| 0
| 0
| 0.001777
| 0
| 0
| 0
| 0
| 0
| 0.0875
| 1
| 0.0625
| false
| 0
| 0.0375
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
d886d8c696a1ebb65d655949115462c4ca60e2a4
| 274
|
py
|
Python
|
vmraid/patches/v6_0/document_type_rename.py
|
sowrisurya/vmraid
|
f833e00978019dad87af80b41279c0146c063ed5
|
[
"MIT"
] | null | null | null |
vmraid/patches/v6_0/document_type_rename.py
|
sowrisurya/vmraid
|
f833e00978019dad87af80b41279c0146c063ed5
|
[
"MIT"
] | null | null | null |
vmraid/patches/v6_0/document_type_rename.py
|
sowrisurya/vmraid
|
f833e00978019dad87af80b41279c0146c063ed5
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
import vmraid
def execute():
vmraid.db.sql("""update tabDocType set document_type='Document'
where document_type='Transaction'""")
vmraid.db.sql("""update tabDocType set document_type='Setup'
where document_type='Master'""")
| 30.444444
| 64
| 0.766423
| 35
| 274
| 5.742857
| 0.542857
| 0.238806
| 0.109453
| 0.169154
| 0.41791
| 0.41791
| 0.41791
| 0.41791
| 0
| 0
| 0
| 0
| 0.105839
| 274
| 8
| 65
| 34.25
| 0.820408
| 0
| 0
| 0
| 0
| 0
| 0.569343
| 0.343066
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| true
| 0
| 0.285714
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
d8b1c38d182c0ed927319fd435a35f4a6f89a701
| 162
|
py
|
Python
|
saleor/webhook/observability/exceptions.py
|
DevPoke/saleor
|
ced3a2249a18031f9f593e71d1d18aa787ec1060
|
[
"CC-BY-4.0"
] | null | null | null |
saleor/webhook/observability/exceptions.py
|
DevPoke/saleor
|
ced3a2249a18031f9f593e71d1d18aa787ec1060
|
[
"CC-BY-4.0"
] | null | null | null |
saleor/webhook/observability/exceptions.py
|
DevPoke/saleor
|
ced3a2249a18031f9f593e71d1d18aa787ec1060
|
[
"CC-BY-4.0"
] | null | null | null |
class ObservabilityError(Exception):
pass
class ConnectionNotConfigured(ObservabilityError):
pass
class TruncationError(ObservabilityError):
pass
| 14.727273
| 50
| 0.790123
| 12
| 162
| 10.666667
| 0.5
| 0.140625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.154321
| 162
| 10
| 51
| 16.2
| 0.934307
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 4
|
d8d3a26759da05b4ac662c9a2c7f47a9af51214a
| 166
|
py
|
Python
|
site_repo/utils/requests.py
|
Aviah/one-click-django-server
|
ddce7181f025b7f8d0979d725f85f8124add6adf
|
[
"MIT"
] | 10
|
2016-03-22T22:14:40.000Z
|
2021-07-23T22:00:02.000Z
|
site_repo/utils/requests.py
|
Aviah/one-click-django-server
|
ddce7181f025b7f8d0979d725f85f8124add6adf
|
[
"MIT"
] | null | null | null |
site_repo/utils/requests.py
|
Aviah/one-click-django-server
|
ddce7181f025b7f8d0979d725f85f8124add6adf
|
[
"MIT"
] | 4
|
2016-04-05T05:41:15.000Z
|
2017-01-08T10:03:25.000Z
|
# requests utils
def get_ip(request):
ip = request.META['REMOTE_ADDR']
ip = request.META.get('HTTP_X_FORWARDED_FOR',ip)
return ip
| 16.6
| 52
| 0.596386
| 22
| 166
| 4.272727
| 0.636364
| 0.287234
| 0.276596
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.295181
| 166
| 10
| 53
| 16.6
| 0.803419
| 0.084337
| 0
| 0
| 0
| 0
| 0.205298
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
2b17cbd8a9488937054f8a24306f05f58eb7a8b6
| 2,757
|
py
|
Python
|
micromath/fibonacci/test_fibonacci.py
|
hedrox/micromath
|
0f300da914c844e5ff0775f25119909f748de635
|
[
"MIT"
] | null | null | null |
micromath/fibonacci/test_fibonacci.py
|
hedrox/micromath
|
0f300da914c844e5ff0775f25119909f748de635
|
[
"MIT"
] | null | null | null |
micromath/fibonacci/test_fibonacci.py
|
hedrox/micromath
|
0f300da914c844e5ff0775f25119909f748de635
|
[
"MIT"
] | null | null | null |
import logging
import json
from server import app
app.testing = True
logging.disable(logging.ERROR)
class TestFibonacci:
def test_correct_fibonacci(self):
with app.test_client() as client:
body = {'number': 16}
result = client.post('/api/v1/fibonacci', json=body)
assert result.status_code == 200
data = json.loads(result.data)
assert int(data['result']) == 610
assert data['error'] is None
def test_invalid_attribute_type(self):
with app.test_client() as client:
body = {'number': None}
result = client.post('/api/v1/fibonacci', json=body)
assert result.status_code == 500
data = json.loads(result.data)
assert data['result'] is None
assert 'name' in data['error']
assert data['error']['name'] == 'ValidationError'
body = {'number': '2'}
result = client.post('/api/v1/fibonacci', json=body)
assert result.status_code == 500
data = json.loads(result.data)
assert data['result'] is None
assert 'name' in data['error']
assert data['error']['name'] == 'ValidationError'
def test_empty_body(self):
with app.test_client() as client:
body = {}
result = client.post('/api/v1/fibonacci', json=body)
assert result.status_code == 500
data = json.loads(result.data)
assert data['result'] is None
assert 'name' in data['error']
assert data['error']['name'] == 'ValidationError'
def test_extra_attribute(self):
with app.test_client() as client:
body = {'number': 32, 'extra_key': 32}
result = client.post('/api/v1/fibonacci', json=body)
assert result.status_code == 500
data = json.loads(result.data)
assert data['result'] is None
assert 'name' in data['error']
assert data['error']['name'] == 'ValidationError'
def test_no_body(self):
with app.test_client() as client:
result = client.post('/api/v1/fibonacci')
assert result.status_code == 400
data = json.loads(result.data)
assert data['result'] is None
assert data['error'] == 'Data not provided'
def test_invalid_api_version(self):
with app.test_client() as client:
body = {'number': 32}
result = client.post('/api/v2/fibonacci', json=body)
assert result.status_code == 404
data = json.loads(result.data)
assert data['result'] is None
assert data['error'] == 'API version v2 not found'
| 33.621951
| 64
| 0.564019
| 322
| 2,757
| 4.742236
| 0.177019
| 0.085134
| 0.073346
| 0.087099
| 0.787164
| 0.773412
| 0.734774
| 0.709234
| 0.663392
| 0.612312
| 0
| 0.021602
| 0.311571
| 2,757
| 81
| 65
| 34.037037
| 0.782929
| 0
| 0
| 0.571429
| 0
| 0
| 0.141095
| 0
| 0
| 0
| 0
| 0
| 0.396825
| 1
| 0.095238
| false
| 0
| 0.047619
| 0
| 0.15873
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
2b240ebab74f79c8c466f67dcad71acc3ab5e267
| 67
|
py
|
Python
|
slack_bolt/context/respond/__init__.py
|
hirosassa/bolt-python
|
befc3a1463f3ac8dbb780d66decc304e2bdf3e7a
|
[
"MIT"
] | 504
|
2020-08-07T05:02:57.000Z
|
2022-03-31T14:32:46.000Z
|
slack_bolt/context/respond/__init__.py
|
hirosassa/bolt-python
|
befc3a1463f3ac8dbb780d66decc304e2bdf3e7a
|
[
"MIT"
] | 560
|
2020-08-07T01:16:06.000Z
|
2022-03-30T00:40:56.000Z
|
slack_bolt/context/respond/__init__.py
|
hirosassa/bolt-python
|
befc3a1463f3ac8dbb780d66decc304e2bdf3e7a
|
[
"MIT"
] | 150
|
2020-08-07T09:41:14.000Z
|
2022-03-30T04:54:51.000Z
|
# Don't add async module imports here
from .respond import Respond
| 22.333333
| 37
| 0.791045
| 11
| 67
| 4.818182
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.164179
| 67
| 2
| 38
| 33.5
| 0.946429
| 0.522388
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
2b32885657370cb4b8b7d916d4f131943c631e50
| 421
|
py
|
Python
|
fexr/fexr.py
|
fexrapis/pyfexr
|
2273f29c9a828722d546e9f7a6cbbc6f50987786
|
[
"MIT"
] | null | null | null |
fexr/fexr.py
|
fexrapis/pyfexr
|
2273f29c9a828722d546e9f7a6cbbc6f50987786
|
[
"MIT"
] | null | null | null |
fexr/fexr.py
|
fexrapis/pyfexr
|
2273f29c9a828722d546e9f7a6cbbc6f50987786
|
[
"MIT"
] | null | null | null |
import requests
import json
def initialize_app(config):
return Fexr(config)
class Fexr:
"""interface"""
def __init__(self, config):
self.botEmail = config["botEmail"]
self.botPassword = config["botPassword"]
self.usageId = config["usageId"]
self.config = None
self.requests = requests.Session()
def core(self):
return Core(self.api_key, self.requests)
| 21.05
| 48
| 0.638955
| 47
| 421
| 5.595745
| 0.446809
| 0.076046
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.244656
| 421
| 19
| 49
| 22.157895
| 0.827044
| 0.021378
| 0
| 0
| 0
| 0
| 0.064039
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.230769
| false
| 0.076923
| 0.153846
| 0.153846
| 0.615385
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 4
|
2b61ffc6917a5f272ca7c83fd65079c3be07964c
| 3,797
|
py
|
Python
|
pcraster/pcraster-4.2.0/pcraster-4.2.0/source/python_modelling_framework/UnitTests/pfTestModels.py
|
quanpands/wflow
|
b454a55e4a63556eaac3fbabd97f8a0b80901e5a
|
[
"MIT"
] | null | null | null |
pcraster/pcraster-4.2.0/pcraster-4.2.0/source/python_modelling_framework/UnitTests/pfTestModels.py
|
quanpands/wflow
|
b454a55e4a63556eaac3fbabd97f8a0b80901e5a
|
[
"MIT"
] | null | null | null |
pcraster/pcraster-4.2.0/pcraster-4.2.0/source/python_modelling_framework/UnitTests/pfTestModels.py
|
quanpands/wflow
|
b454a55e4a63556eaac3fbabd97f8a0b80901e5a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
import pcraster
import pcraster.framework.dynamicPCRasterBase as dynamicPCRasterBase
import pcraster.framework.mcPCRasterBase as mcPCRasterBase
import pcraster.framework.pfPCRasterBase as pfPCRasterBase
import pcraster.framework.staticPCRasterBase as staticPCRasterBase
class StaticWithoutAll(staticPCRasterBase.StaticModel, mcPCRasterBase.MonteCarloModel):
def __init__(self):
staticPCRasterBase.StaticModel.__init__(self)
mcPCRasterBase.MonteCarloModel.__init__(self)
def initial(self):
pass
class DynamicWithoutAll(dynamicPCRasterBase.DynamicModel, mcPCRasterBase.MonteCarloModel):
def __init__(self):
dynamicPCRasterBase.DynamicModel.__init__(self)
mcPCRasterBase.MonteCarloModel.__init__(self)
def initial(self):
pass
class StaticWithoutSuspend(staticPCRasterBase.StaticModel, mcPCRasterBase.MonteCarloModel):
def __init__(self):
staticPCRasterBase.StaticModel.__init__(self)
mcPCRasterBase.MonteCarloModel.__init__(self)
def initial(self):
pass
def updateWeight(self):
pass
class DynamicWithoutSuspend(dynamicPCRasterBase.DynamicModel, mcPCRasterBase.MonteCarloModel):
def __init__(self):
dynamicPCRasterBase.DynamicModel.__init__(self)
mcPCRasterBase.MonteCarloModel.__init__(self)
def initial(self):
pass
def updateWeight(self):
pass
class StaticWithoutResume(mcPCRasterBase.MonteCarloModel):
def __init__(self):
pass
def initial(self):
pass
def updateWeight(self):
pass
def suspend(self):
pass
class DynamicWithoutResume(mcPCRasterBase.MonteCarloModel):
def __init__(self):
pass
def initial(self):
pass
def updateWeight(self):
pass
def suspend(self):
pass
#
class T0(mcPCRasterBase.MonteCarloModel):
def __init__(self):
pass
#
class T1(mcPCRasterBase.MonteCarloModel):
def __init__(self):
pass
#
class staticModel(mcPCRasterBase.MonteCarloModel):
def __init__(self):
mcPCRasterBase.MonteCarloModel.__init__(self)
staticPCRasterBase.StaticModel.__init__(self)
pcraster.setclone("clone.map")
self.newmap = pcraster.readmap("clone.map")
def initial(self):
name = "mcsi%d" % (self.currentSampleNumber())
self.report(self.newmap, name)
def premcloop(self):
for sample in self.sampleNumbers():
name = "premc%d" % (sample)
self.report(self.newmap, name)
def postmcloop(self):
for sample in self.sampleNumbers():
name = "postmc%d" % (sample)
self.report(self.newmap, name)
#
class DynamicModel(dynamicPCRasterBase.DynamicModel, mcPCRasterBase.MonteCarloModel, pfPCRasterBase.ParticleFilterModel):
def __init__(self):
dynamicPCRasterBase.DynamicModel.__init__(self)
mcPCRasterBase.MonteCarloModel.__init__(self)
pfPCRasterBase.ParticleFilterModel.__init__(self)
pcraster.setclone("clone.map")
self.newmap = pcraster.readmap("clone.map")
def initial(self):
name = "mcdi%d" % (self.currentSampleNumber())
self.report(self.newmap, name)
self.stateVar = self.currentSampleNumber()
def premcloop(self):
for sample in self.sampleNumbers():
for timestep in self.timeSteps():
name = "premc_%d_%d" % (sample, timestep)
self.report("clone.map", name)
def postmcloop(self):
for sample in self.sampleNumbers():
for timestep in self.timeSteps():
name = "postmc_%d_%d" % (sample, timestep)
self.report("clone.map", name)
def dynamic(self):
name = "mcdd%d" % (self.currentSampleNumber())
self.report("clone.map", name)
def updateWeight(self):
return random.random()
def suspend(self):
assert self.stateVar == self.currentSampleNumber()
def resume(self):
assert self.stateVar == self.currentSampleNumber()
| 27.316547
| 121
| 0.738214
| 388
| 3,797
| 6.976804
| 0.162371
| 0.067972
| 0.040635
| 0.11969
| 0.761729
| 0.738825
| 0.653122
| 0.592538
| 0.536387
| 0.514592
| 0
| 0.000936
| 0.156176
| 3,797
| 138
| 122
| 27.514493
| 0.843945
| 0.011061
| 0
| 0.733333
| 0
| 0
| 0.03175
| 0
| 0
| 0
| 0
| 0
| 0.019048
| 1
| 0.304762
| false
| 0.152381
| 0.057143
| 0.009524
| 0.466667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 4
|
996276e35632e5351e97d09b376c527fbb524a3d
| 1,409
|
py
|
Python
|
alpa/torch/optim/adam.py
|
TarzanZhao/alpa
|
c477805bfb182788ed07e8b3a8e2924333433507
|
[
"Apache-2.0"
] | null | null | null |
alpa/torch/optim/adam.py
|
TarzanZhao/alpa
|
c477805bfb182788ed07e8b3a8e2924333433507
|
[
"Apache-2.0"
] | null | null | null |
alpa/torch/optim/adam.py
|
TarzanZhao/alpa
|
c477805bfb182788ed07e8b3a8e2924333433507
|
[
"Apache-2.0"
] | null | null | null |
"""Adam optimizer"""
import copy
import torch
def adam(lr=1e-4):
"""torchoptim.adam(**adam_config)(params)
Factory that generates functional version of Adam optimizer.
Implementation has no in-place op and no data-dependent control flow.
Returns:
- `optim_func`: a function that:
- takes (`params`, `optim_state`, `params_grad`) as input
- returns (`params`, `optim_state`)
after applying Adam algorithm
- `optim_state_init_func`: a function that:
- takes `optim_state` as input
- returns `optim_state` which is Adam optimizer state
- `optim_state`: tracked state (shape-only) of Adam optimizer.
"""
# TODO FIXME: properly implement Adam optimizer
def optim_gen(params):
def optim_func(params, optim_state, params_grad):
for k in params:
params[k] = params[k] + params_grad[k] * lr
optim_state[k] = optim_state[k] + 1
return params, optim_state
optim_state = copy.deepcopy(params)
def optim_state_init_func(optim_state):
new_state = {}
for k, v in optim_state.items():
new_state[k] = torch.full_like(v, 0.0)
return new_state
return optim_func, optim_state_init_func, optim_state
return optim_gen
| 32.022727
| 77
| 0.596167
| 173
| 1,409
| 4.653179
| 0.369942
| 0.198758
| 0.079503
| 0.067081
| 0.18882
| 0.069565
| 0
| 0
| 0
| 0
| 0
| 0.005187
| 0.315827
| 1,409
| 43
| 78
| 32.767442
| 0.829876
| 0.457062
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023256
| 0
| 1
| 0.235294
| false
| 0
| 0.117647
| 0
| 0.588235
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
99a7afbbb47204c705de2e7ed32e0ae9dbea9c39
| 62
|
py
|
Python
|
hello_world.py
|
deepti-anand/hello-world
|
494fc16c416dc8fe25600d5830052343b70f2070
|
[
"Apache-2.0"
] | null | null | null |
hello_world.py
|
deepti-anand/hello-world
|
494fc16c416dc8fe25600d5830052343b70f2070
|
[
"Apache-2.0"
] | null | null | null |
hello_world.py
|
deepti-anand/hello-world
|
494fc16c416dc8fe25600d5830052343b70f2070
|
[
"Apache-2.0"
] | null | null | null |
print("hi")
print("Hello World") #print #diff
print("hellooo")
| 20.666667
| 33
| 0.693548
| 9
| 62
| 4.777778
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.080645
| 62
| 3
| 34
| 20.666667
| 0.754386
| 0.16129
| 0
| 0
| 0
| 0
| 0.392157
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
99a8f1f1c93af08f3ec765e04d13a2b8d33dbbbd
| 84
|
py
|
Python
|
hello.py
|
bingle2400/cs3240-labdemo
|
5dca6fd8f4c510aa5b06d7dd17e5204e0cc13ffc
|
[
"MIT"
] | null | null | null |
hello.py
|
bingle2400/cs3240-labdemo
|
5dca6fd8f4c510aa5b06d7dd17e5204e0cc13ffc
|
[
"MIT"
] | null | null | null |
hello.py
|
bingle2400/cs3240-labdemo
|
5dca6fd8f4c510aa5b06d7dd17e5204e0cc13ffc
|
[
"MIT"
] | null | null | null |
from helper import reeting
if __name__ == "__main__":
reeting("hello")
| 9.333333
| 26
| 0.630952
| 9
| 84
| 5
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.261905
| 84
| 8
| 27
| 10.5
| 0.725806
| 0
| 0
| 0
| 0
| 0
| 0.158537
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
41da78eeb82897161300840746eef404c63146f0
| 7,485
|
py
|
Python
|
test_farbfeld.py
|
jmp/farbfeld
|
08cb56957624cfe3a1fe872390aa3252226e57cd
|
[
"MIT"
] | 2
|
2020-02-01T14:59:15.000Z
|
2021-11-03T12:39:10.000Z
|
test_farbfeld.py
|
jmp/farbfeld
|
08cb56957624cfe3a1fe872390aa3252226e57cd
|
[
"MIT"
] | 5
|
2019-01-26T11:58:58.000Z
|
2019-08-31T08:16:59.000Z
|
test_farbfeld.py
|
jmp/farbfeld
|
08cb56957624cfe3a1fe872390aa3252226e57cd
|
[
"MIT"
] | null | null | null |
# pylint: disable=missing-docstring
import io
import unittest
import farbfeld
class ReadTest(unittest.TestCase):
def test_read_empty_data(self):
self.assertRaises(
farbfeld.InvalidFormat,
farbfeld.read,
io.BytesIO(b''),
)
def test_read_header_only(self):
self.assertRaises(
farbfeld.InvalidFormat,
farbfeld.read,
io.BytesIO(b'farbfeld'),
)
def test_read_wrong_header_no_data(self):
self.assertRaises(
farbfeld.InvalidFormat,
farbfeld.read,
io.BytesIO(b'dlefbraf'),
)
def test_read_correct_data_wrong_header(self):
self.assertRaises(farbfeld.InvalidFormat, farbfeld.read, io.BytesIO(
b'dlefbraf' # magic
b'\x00\x00\x00\x01' # width
b'\x00\x00\x00\x01' # height
b'\x01\x02\x03\x04\x05\x06\x07\x08' # RGBA
))
def test_read_valid_but_no_pixels(self):
pixels = farbfeld.read(io.BytesIO(
b'farbfeld' # magic
b'\x00\x00\x00\x00' # width
b'\x00\x00\x00\x00' # height
))
self.assertListEqual([], pixels)
def test_read_valid_but_too_few_pixels(self):
self.assertRaises(
farbfeld.InvalidFormat,
farbfeld.read,
io.BytesIO(
b'farbfeld' # magic
b'\x00\x00\x00\x01' # width
b'\x00\x00\x00\x02' # height
b'\xff\xff\xff\xff\xff\xff\xff\xff' # RGBA
),
)
def test_read_valid_but_too_many_pixels(self):
self.assertRaises(
farbfeld.InvalidFormat,
farbfeld.read,
io.BytesIO(
b'farbfeld' # magic
b'\x00\x00\x00\x01' # width
b'\x00\x00\x00\x01' # height
b'\xff\xff\xff\xff\xff\xff\xff\xff' # RGBA
b'\xff\xff\xff\xff\xff\xff\xff\xff' # RGBA
),
)
def test_read_zero_width(self):
pixels = farbfeld.read(io.BytesIO(
b'farbfeld' # magic
b'\x00\x00\x00\x00' # width
b'\x00\x00\x00\x01' # height
))
self.assertListEqual([], pixels)
def test_read_zero_height(self):
pixels = farbfeld.read(io.BytesIO(
b'farbfeld' # magic
b'\x00\x00\x00\x01' # width
b'\x00\x00\x00\x00' # height
))
self.assertListEqual([], pixels)
def test_read_incomplete_pixel(self):
self.assertRaises(
farbfeld.InvalidFormat,
farbfeld.read,
io.BytesIO(
b'farbfeld' # magic
b'\x00\x00\x00\x01' # width
b'\x00\x00\x00\x01' # height
b'\x00\x20\x00\x40\x00\x80\x00' # RGBA
),
)
def test_read_single_pixel(self):
pixels = farbfeld.read(io.BytesIO(
b'farbfeld' # magic
b'\x00\x00\x00\x01' # width
b'\x00\x00\x00\x01' # height
b'\x00\x20\x00\x40\x00\x80\x00\xff' # RGBA
))
self.assertListEqual([[[32, 64, 128, 255]]], pixels)
def test_read_two_by_two(self):
pixels = farbfeld.read(io.BytesIO(
b'farbfeld' # magic
b'\x00\x00\x00\x02' # width
b'\x00\x00\x00\x02' # height
b'\x00\x01\x00\x02\x00\x03\x00\x04' # RGBA
b'\x00\x05\x00\x06\x00\x07\x00\x08' # RGBA
b'\x00\x09\x00\x0a\x00\x0b\x00\x0c' # RGBA
b'\x00\x0d\x00\x0e\x00\x0f\x00\x10' # RGBA
))
self.assertListEqual([
[[1, 2, 3, 4], [5, 6, 7, 8]],
[[9, 10, 11, 12], [13, 14, 15, 16]],
], pixels)
class WriteTest(unittest.TestCase):
def test_write_invalid_data(self):
self.assertRaises(ValueError, farbfeld.write, io.BytesIO(), None)
def test_write_zero_height(self):
file = io.BytesIO()
farbfeld.write(file, [])
file.seek(0)
self.assertEqual(
file.read(),
b'farbfeld' # magic
b'\x00\x00\x00\x00' # width
b'\x00\x00\x00\x00' # height
)
def test_write_zero_width(self):
file = io.BytesIO()
farbfeld.write(file, [[]])
file.seek(0)
self.assertEqual(
file.read(),
b'farbfeld' # magic
b'\x00\x00\x00\x00' # width
b'\x00\x00\x00\x01' # height
)
def test_write_incomplete_pixels(self):
self.assertRaises(ValueError, farbfeld.write, io.BytesIO(), [[[]]])
def test_write_too_few_components(self):
self.assertRaises(
ValueError,
farbfeld.write,
io.BytesIO(),
[[[1, 2, 3]]],
)
def test_write_too_many_components(self):
self.assertRaises(
ValueError,
farbfeld.write,
io.BytesIO(),
[[[1, 2, 3, 4, 5]]],
)
def test_write_component_out_of_range(self):
self.assertRaises(
ValueError,
farbfeld.write,
io.BytesIO(),
[[[0, 0, 0, -1]]],
)
self.assertRaises(
ValueError,
farbfeld.write,
io.BytesIO(),
[[[0, 0, 0, 65536]]],
)
def test_write_component_within_range(self):
try:
farbfeld.write(io.BytesIO(), [[[0, 0, 0, 0]]])
farbfeld.write(io.BytesIO(), [[[32767, 32767, 32767, 32767]]])
farbfeld.write(io.BytesIO(), [[[65535, 65535, 65535, 65535]]])
except ValueError:
self.fail('ValueError raised unexpectedly')
def test_write_invalid_component(self):
self.assertRaises(
ValueError,
farbfeld.write,
io.BytesIO(),
[[[0, 0, 0, 0.5]]],
)
self.assertRaises(
ValueError,
farbfeld.write,
io.BytesIO(),
[[[0, 0, 0, '1']]],
)
self.assertRaises(
ValueError,
farbfeld.write,
io.BytesIO(),
[[[0, 0, 0, None]]],
)
def test_write_inconsistent_width(self):
self.assertRaises(ValueError, farbfeld.write, io.BytesIO(), [[
[0, 0, 0, 0], [0, 0, 0, 0], # first row, two pixels
], [
[0, 0, 0, 0], # second row, only one pixel
]])
def test_write_single_pixel(self):
file = io.BytesIO()
farbfeld.write(file, [[[32, 64, 128, 255]]])
file.seek(0)
self.assertEqual(
file.read(),
b'farbfeld' # magic
b'\x00\x00\x00\x01' # width
b'\x00\x00\x00\x01' # height
b'\x00\x20\x00\x40\x00\x80\x00\xff' # RGBA
)
def test_write_two_by_two(self):
file = io.BytesIO()
farbfeld.write(file, [
[[1, 2, 3, 4], [5, 6, 7, 8]],
[[9, 10, 11, 12], [13, 14, 15, 16]],
])
file.seek(0)
self.assertEqual(
file.read(),
b'farbfeld' # magic
b'\x00\x00\x00\x02' # width
b'\x00\x00\x00\x02' # height
b'\x00\x01\x00\x02\x00\x03\x00\x04' # RGBA
b'\x00\x05\x00\x06\x00\x07\x00\x08' # RGBA
b'\x00\x09\x00\x0a\x00\x0b\x00\x0c' # RGBA
b'\x00\x0d\x00\x0e\x00\x0f\x00\x10' # RGBA
)
| 30.303644
| 76
| 0.501536
| 871
| 7,485
| 4.205511
| 0.129736
| 0.096642
| 0.081081
| 0.07098
| 0.754573
| 0.754027
| 0.741742
| 0.712258
| 0.675949
| 0.675949
| 0
| 0.112829
| 0.360588
| 7,485
| 246
| 77
| 30.426829
| 0.652528
| 0.053975
| 0
| 0.62844
| 0
| 0
| 0.148512
| 0.067777
| 0
| 0
| 0
| 0
| 0.119266
| 1
| 0.110092
| false
| 0
| 0.013761
| 0
| 0.133028
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
510589bac3c231ff9dfff00d74d2b2a73850d0b7
| 934
|
py
|
Python
|
release/stubs.min/Tekla/Structures/ModelInternal_parts/dotTemporaryStatesEnum.py
|
YKato521/ironpython-stubs
|
b1f7c580de48528490b3ee5791b04898be95a9ae
|
[
"MIT"
] | null | null | null |
release/stubs.min/Tekla/Structures/ModelInternal_parts/dotTemporaryStatesEnum.py
|
YKato521/ironpython-stubs
|
b1f7c580de48528490b3ee5791b04898be95a9ae
|
[
"MIT"
] | null | null | null |
release/stubs.min/Tekla/Structures/ModelInternal_parts/dotTemporaryStatesEnum.py
|
YKato521/ironpython-stubs
|
b1f7c580de48528490b3ee5791b04898be95a9ae
|
[
"MIT"
] | null | null | null |
class dotTemporaryStatesEnum(Enum):
""" enum dotTemporaryStatesEnum,values: DOT_TEMPORARY_STATE_ACCEPTED (8),DOT_TEMPORARY_STATE_ACTIVE (6),DOT_TEMPORARY_STATE_DELETED (3),DOT_TEMPORARY_STATE_DM_ONGOING (4),DOT_TEMPORARY_STATE_MODIFIED (2),DOT_TEMPORARY_STATE_NEW (1),DOT_TEMPORARY_STATE_ORIGINAL (7),DOT_TEMPORARY_STATE_REJECTED (9),DOT_TEMPORARY_STATE_UNCHANGED (5),DOT_TEMPORARY_STATE_UNKNOWN (0),DOT_TEMPORARY_STATE_USE_EXISTING_REPRESENTATION (10) """
DOT_TEMPORARY_STATE_ACCEPTED = None
DOT_TEMPORARY_STATE_ACTIVE = None
DOT_TEMPORARY_STATE_DELETED = None
DOT_TEMPORARY_STATE_DM_ONGOING = None
DOT_TEMPORARY_STATE_MODIFIED = None
DOT_TEMPORARY_STATE_NEW = None
DOT_TEMPORARY_STATE_ORIGINAL = None
DOT_TEMPORARY_STATE_REJECTED = None
DOT_TEMPORARY_STATE_UNCHANGED = None
DOT_TEMPORARY_STATE_UNKNOWN = None
DOT_TEMPORARY_STATE_USE_EXISTING_REPRESENTATION = None
value__ = None
| 58.375
| 424
| 0.826552
| 124
| 934
| 5.629032
| 0.274194
| 0.378224
| 0.535817
| 0.30086
| 0.194842
| 0.120344
| 0
| 0
| 0
| 0
| 0
| 0.01451
| 0.114561
| 934
| 15
| 425
| 62.266667
| 0.829504
| 0.441113
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
5147aa629fe9091066db83d5ac4d5067803255bc
| 359
|
py
|
Python
|
src/dev.py
|
Inigoperez/Proyecto_Interfaces
|
4a2b98cd499fabf3789301e9eb488297bebfcf2a
|
[
"MIT"
] | null | null | null |
src/dev.py
|
Inigoperez/Proyecto_Interfaces
|
4a2b98cd499fabf3789301e9eb488297bebfcf2a
|
[
"MIT"
] | null | null | null |
src/dev.py
|
Inigoperez/Proyecto_Interfaces
|
4a2b98cd499fabf3789301e9eb488297bebfcf2a
|
[
"MIT"
] | null | null | null |
if __name__ == '__main__':
from dotenv import load_dotenv
load_dotenv()
from config import StageConfig
from util import route
from util.commands import commands
else:
from .config import StageConfig
from .util import route
from .util.commands import commands
app = route(config=StageConfig, name=__name__)
commands(app=app)
| 22.4375
| 46
| 0.732591
| 46
| 359
| 5.413043
| 0.304348
| 0.128514
| 0.128514
| 0.216867
| 0.610442
| 0.610442
| 0.610442
| 0.610442
| 0.610442
| 0.610442
| 0
| 0
| 0.206128
| 359
| 15
| 47
| 23.933333
| 0.873684
| 0
| 0
| 0
| 0
| 0
| 0.022346
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.583333
| 0
| 0.583333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
5159402d19245aba61bb579f188067d4ee6bc977
| 274
|
py
|
Python
|
vivit/extensions/secondorder/sqrt_ggn/dropout.py
|
PwLo3K46/vivit
|
937642975be2ade122632d4eaef273461992d7ab
|
[
"MIT"
] | 1
|
2021-06-07T05:15:22.000Z
|
2021-06-07T05:15:22.000Z
|
vivit/extensions/secondorder/sqrt_ggn/dropout.py
|
PwLo3K46/vivit
|
937642975be2ade122632d4eaef273461992d7ab
|
[
"MIT"
] | 2
|
2021-08-10T12:45:37.000Z
|
2021-08-10T12:49:51.000Z
|
vivit/extensions/secondorder/sqrt_ggn/dropout.py
|
PwLo3K46/vivit
|
937642975be2ade122632d4eaef273461992d7ab
|
[
"MIT"
] | null | null | null |
from backpack.core.derivatives.dropout import DropoutDerivatives
from vivit.extensions.secondorder.sqrt_ggn.sqrt_ggn_base import SqrtGGNBaseModule
class SqrtGGNDropout(SqrtGGNBaseModule):
def __init__(self):
super().__init__(derivatives=DropoutDerivatives())
| 30.444444
| 81
| 0.821168
| 28
| 274
| 7.642857
| 0.714286
| 0.065421
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.10219
| 274
| 8
| 82
| 34.25
| 0.869919
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.4
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
5163521d44aed6af36c7ee8ac7cba90723cfbc26
| 49
|
py
|
Python
|
api/common.py
|
ewangplay/wallet-sdk-py
|
9f55ffa6bb7968192fd88e2b2dddf04c997874ab
|
[
"Apache-2.0"
] | 13
|
2018-03-06T10:04:26.000Z
|
2021-08-12T11:25:29.000Z
|
api/common.py
|
ewangplay/wallet-sdk-py
|
9f55ffa6bb7968192fd88e2b2dddf04c997874ab
|
[
"Apache-2.0"
] | 42
|
2018-02-08T04:32:56.000Z
|
2018-09-06T07:30:58.000Z
|
api/common.py
|
ewangplay/wallet-sdk-py
|
9f55ffa6bb7968192fd88e2b2dddf04c997874ab
|
[
"Apache-2.0"
] | 15
|
2018-02-11T09:29:31.000Z
|
2021-07-04T07:33:22.000Z
|
from cryption.crypto import sign
VERSION = "v1"
| 12.25
| 32
| 0.755102
| 7
| 49
| 5.285714
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02439
| 0.163265
| 49
| 3
| 33
| 16.333333
| 0.878049
| 0
| 0
| 0
| 0
| 0
| 0.040816
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
5163aaae4b007b773986e024d8629d4d1f39a0b9
| 589
|
py
|
Python
|
tests/unit/test_subscription.py
|
eldarion/braintree_python
|
8be3f69fb9a4171c5e9be049c8440fcc4f79fb40
|
[
"MIT"
] | 3
|
2015-11-05T08:57:12.000Z
|
2016-07-17T18:10:55.000Z
|
tests/unit/test_subscription.py
|
eldarion/braintree_python
|
8be3f69fb9a4171c5e9be049c8440fcc4f79fb40
|
[
"MIT"
] | null | null | null |
tests/unit/test_subscription.py
|
eldarion/braintree_python
|
8be3f69fb9a4171c5e9be049c8440fcc4f79fb40
|
[
"MIT"
] | null | null | null |
from tests.test_helper import *
class TestSubscription(unittest.TestCase):
def test_create_raises_exception_with_bad_keys(self):
try:
Subscription.create({"bad_key": "value"})
self.assertTrue(False)
except KeyError, e:
self.assertEquals("'Invalid keys: bad_key'", str(e))
def test_update_raises_exception_with_bad_keys(self):
try:
Subscription.update("id", {"bad_key": "value"})
self.assertTrue(False)
except KeyError, e:
self.assertEquals("'Invalid keys: bad_key'", str(e))
| 34.647059
| 64
| 0.633277
| 68
| 589
| 5.235294
| 0.455882
| 0.067416
| 0.106742
| 0.123596
| 0.713483
| 0.713483
| 0.713483
| 0.713483
| 0.460674
| 0.460674
| 0
| 0
| 0.249576
| 589
| 16
| 65
| 36.8125
| 0.80543
| 0
| 0
| 0.571429
| 0
| 0
| 0.122241
| 0
| 0
| 0
| 0
| 0
| 0.285714
| 0
| null | null | 0
| 0.071429
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
5ac3496c6293aef55801f31bfe043059c6610afa
| 467
|
py
|
Python
|
pmaf/biome/survey/_metakit.py
|
mmtechslv/PhyloMAF
|
bab43dd4a4d2812951b1fdf4f1abb83edb79ea88
|
[
"BSD-3-Clause"
] | 1
|
2021-07-02T06:24:17.000Z
|
2021-07-02T06:24:17.000Z
|
pmaf/biome/survey/_metakit.py
|
mmtechslv/PhyloMAF
|
bab43dd4a4d2812951b1fdf4f1abb83edb79ea88
|
[
"BSD-3-Clause"
] | 1
|
2021-06-28T12:02:46.000Z
|
2021-06-28T12:02:46.000Z
|
pmaf/biome/survey/_metakit.py
|
mmtechslv/PhyloMAF
|
bab43dd4a4d2812951b1fdf4f1abb83edb79ea88
|
[
"BSD-3-Clause"
] | null | null | null |
from abc import abstractmethod
from pmaf.biome._metakit import BiomeFeatureMetabase, BiomeSampleMetabase
class BiomeSurveyBackboneMetabase(BiomeFeatureMetabase, BiomeSampleMetabase):
@abstractmethod
def to_assembly(self):
pass
@property
@abstractmethod
def essentials(self):
pass
@property
@abstractmethod
def assemblies(self):
pass
@property
@abstractmethod
def controller(self):
pass
| 19.458333
| 77
| 0.704497
| 39
| 467
| 8.384615
| 0.512821
| 0.207951
| 0.146789
| 0.275229
| 0.302752
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.239829
| 467
| 23
| 78
| 20.304348
| 0.921127
| 0
| 0
| 0.611111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.222222
| false
| 0.222222
| 0.111111
| 0
| 0.388889
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 4
|
5ae33aa8d62f63600348c0e8b3870cf2f541e67e
| 224
|
py
|
Python
|
translate.py
|
vipul-khatana/Hinglish-Sentiment-Analysis
|
27b0d0aca736194b8f06b8cccb3e68537d91709f
|
[
"MIT"
] | 19
|
2017-12-03T14:17:13.000Z
|
2022-02-23T19:06:07.000Z
|
translate.py
|
vipul-khatana/Hinglish-Sentiment-Analysis
|
27b0d0aca736194b8f06b8cccb3e68537d91709f
|
[
"MIT"
] | 2
|
2018-12-14T12:43:47.000Z
|
2020-03-31T15:27:31.000Z
|
translate.py
|
vipul-khatana/Hinglish-Sentiment-Analysis
|
27b0d0aca736194b8f06b8cccb3e68537d91709f
|
[
"MIT"
] | 23
|
2018-12-14T04:37:47.000Z
|
2022-03-25T09:58:26.000Z
|
# -*- coding: utf-8 -*-
import numpy as np
from googletrans import Translator
translator = Translator(service_urls=['translate.google.co.in'])
def translate(word):
return translator.translate(word,src='hi' , dest='en')
| 22.4
| 64
| 0.727679
| 30
| 224
| 5.4
| 0.766667
| 0.246914
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005076
| 0.120536
| 224
| 9
| 65
| 24.888889
| 0.817259
| 0.09375
| 0
| 0
| 0
| 0
| 0.129353
| 0.109453
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.4
| 0.2
| 0.8
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
|
0
| 4
|
851edb7f446fce929abe41c0aa3fb8e6f6bcca40
| 481
|
py
|
Python
|
src/bxgateway/services/gateway_broadcast_service.py
|
blockchain-development-resources/bxgateway
|
761b5085f9c7c6527f0b9aaae06d2f70f3786db2
|
[
"MIT"
] | 1
|
2021-11-26T07:49:24.000Z
|
2021-11-26T07:49:24.000Z
|
src/bxgateway/services/gateway_broadcast_service.py
|
beepool/bxgateway
|
761b5085f9c7c6527f0b9aaae06d2f70f3786db2
|
[
"MIT"
] | null | null | null |
src/bxgateway/services/gateway_broadcast_service.py
|
beepool/bxgateway
|
761b5085f9c7c6527f0b9aaae06d2f70f3786db2
|
[
"MIT"
] | 1
|
2021-09-06T02:10:08.000Z
|
2021-09-06T02:10:08.000Z
|
from bxcommon.connections.abstract_connection import AbstractConnection
from bxcommon.messages.abstract_message import AbstractMessage
from bxcommon.services.broadcast_service import BroadcastService
class GatewayBroadcastService(BroadcastService[AbstractMessage, AbstractConnection]):
def should_broadcast_to_connection(self, message: AbstractMessage, connection: AbstractConnection) -> bool:
# gateway does not really care about network numbers
return True
| 48.1
| 111
| 0.837838
| 47
| 481
| 8.446809
| 0.659574
| 0.09068
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116424
| 481
| 9
| 112
| 53.444444
| 0.934118
| 0.10395
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.5
| 0.166667
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 4
|
8527867363759e9aaf795447f6ea1b7805273895
| 476
|
py
|
Python
|
tests/constraints/test_minLength.py
|
vincentchevrier/tableschema-py
|
d47b6d73ab3ea1051c866b93b6e2b9202bbd40b1
|
[
"MIT"
] | 224
|
2017-04-11T11:29:48.000Z
|
2022-03-26T18:34:50.000Z
|
tests/constraints/test_minLength.py
|
vincentchevrier/tableschema-py
|
d47b6d73ab3ea1051c866b93b6e2b9202bbd40b1
|
[
"MIT"
] | 111
|
2017-03-28T19:02:01.000Z
|
2021-12-20T08:42:21.000Z
|
tests/constraints/test_minLength.py
|
vincentchevrier/tableschema-py
|
d47b6d73ab3ea1051c866b93b6e2b9202bbd40b1
|
[
"MIT"
] | 40
|
2017-04-01T08:21:57.000Z
|
2021-02-28T23:52:07.000Z
|
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import pytest
from tableschema import constraints
# Tests
@pytest.mark.parametrize('constraint, value, result', [
(0, [1], True),
(1, [1], True),
(2, [1], False),
])
def test_check_minLength(constraint, value, result):
assert constraints.check_minLength(constraint, value) == result
| 23.8
| 67
| 0.735294
| 58
| 476
| 5.655172
| 0.534483
| 0.121951
| 0.195122
| 0.176829
| 0.213415
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017327
| 0.151261
| 476
| 19
| 68
| 25.052632
| 0.794554
| 0.056723
| 0
| 0
| 0
| 0
| 0.056054
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 1
| 0.076923
| false
| 0
| 0.461538
| 0
| 0.538462
| 0.076923
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
517478985ae408d05857add2bf5a7f8926b3712f
| 268
|
py
|
Python
|
ztlearn/datasets/__init__.py
|
jefkine/zeta-learn
|
04388f90093b52f5df2f334c898f3a1224f5a13f
|
[
"MIT"
] | 30
|
2018-03-12T19:16:27.000Z
|
2021-12-16T05:32:38.000Z
|
ztlearn/datasets/__init__.py
|
jefkine/zeta-learn
|
04388f90093b52f5df2f334c898f3a1224f5a13f
|
[
"MIT"
] | 4
|
2018-06-13T03:47:15.000Z
|
2018-11-05T21:33:34.000Z
|
ztlearn/datasets/__init__.py
|
jefkine/zeta-learn
|
04388f90093b52f5df2f334c898f3a1224f5a13f
|
[
"MIT"
] | 4
|
2018-04-30T07:42:47.000Z
|
2022-01-31T11:35:53.000Z
|
# -*- coding: utf-8 -*-
# import module(s)
from . import pima
from . import iris
from . import mnist
from . import cifar
from . import digits
from . import boston
from . import fashion
from . import data_set
# import from data_utils.py
from .data_set import DataSet
| 17.866667
| 29
| 0.727612
| 41
| 268
| 4.682927
| 0.463415
| 0.416667
| 0.135417
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004587
| 0.186567
| 268
| 14
| 30
| 19.142857
| 0.876147
| 0.238806
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
518975b5241f59261c929bcb2590e0efbd096746
| 94
|
py
|
Python
|
NicBot/errors.py
|
nicdgonzalez/NicBot-discord.py
|
3a21510c1e4e2c933f48708478ae792159324a7c
|
[
"MIT"
] | null | null | null |
NicBot/errors.py
|
nicdgonzalez/NicBot-discord.py
|
3a21510c1e4e2c933f48708478ae792159324a7c
|
[
"MIT"
] | null | null | null |
NicBot/errors.py
|
nicdgonzalez/NicBot-discord.py
|
3a21510c1e4e2c933f48708478ae792159324a7c
|
[
"MIT"
] | null | null | null |
class NicBotException(Exception):
pass
class UpdateNewFile(NicBotException):
pass
| 10.444444
| 37
| 0.744681
| 8
| 94
| 8.75
| 0.625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.191489
| 94
| 8
| 38
| 11.75
| 0.921053
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 4
|
51a8565b769000f11b09672e6d238626a93895dc
| 61,477
|
py
|
Python
|
generated-libraries/python/netapp/fpolicy/__init__.py
|
radekg/netapp-ontap-lib-get
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
[
"MIT"
] | 2
|
2017-03-28T15:31:26.000Z
|
2018-08-16T22:15:18.000Z
|
generated-libraries/python/netapp/fpolicy/__init__.py
|
radekg/netapp-ontap-lib-get
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
[
"MIT"
] | null | null | null |
generated-libraries/python/netapp/fpolicy/__init__.py
|
radekg/netapp-ontap-lib-get
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
[
"MIT"
] | null | null | null |
from netapp.connection import NaConnection
from extension_list_info import ExtensionListInfo # 1 properties
from event_name import EventName # 0 properties
from fpolicy_policy_get_iter_key_td import FpolicyPolicyGetIterKeyTd # 2 properties
from monitored_operation_info import MonitoredOperationInfo # 1 properties
from fpolicy_event_options_config import FpolicyEventOptionsConfig # 6 properties
from secondary_server_info import SecondaryServerInfo # 1 properties
from fpolicy_policy_event_get_iter_key_td import FpolicyPolicyEventGetIterKeyTd # 2 properties
from fpolicy_proto import FpolicyProto # 0 properties
from fpolicy_policy_status_info import FpolicyPolicyStatusInfo # 4 properties
from fpolicy_volumes_list_info import FpolicyVolumesListInfo # 1 properties
from fpolicy_filter import FpolicyFilter # 0 properties
from fpolicy_policy_info import FpolicyPolicyInfo # 7 properties
from engine_name import EngineName # 0 properties
from policy_info import PolicyInfo # 10 properties
from fpolicy_policy_external_engine_get_iter_key_td import FpolicyPolicyExternalEngineGetIterKeyTd # 2 properties
from fpolicy_external_engine_info import FpolicyExternalEngineInfo # 17 properties
from fpolicy_server_status_info import FpolicyServerStatusInfo # 9 properties
from fpolicy_operation import FpolicyOperation # 0 properties
from server_info import ServerInfo # 11 properties
from fpolicy_server_type import FpolicyServerType # 0 properties
from fpolicy_policy_status_get_iter_key_td import FpolicyPolicyStatusGetIterKeyTd # 2 properties
from common_name import CommonName # 0 properties
from fpolicy_ssl_opts import FpolicySslOpts # 0 properties
from fpolicy_scope_config import FpolicyScopeConfig # 11 properties
from fpolicy_server_status_get_iter_key_td import FpolicyServerStatusGetIterKeyTd # 4 properties
from monitored_protocol_info import MonitoredProtocolInfo # 1 properties
from fpolicy_policy_scope_get_iter_key_td import FpolicyPolicyScopeGetIterKeyTd # 2 properties
from fpolicy_server_status import FpolicyServerStatus # 0 properties
from external_engine_type import ExternalEngineType # 0 properties
class FpolicyConnection(NaConnection):
def fpolicy_server_disconnect(self, node, policy_name, server):
"""
Terminate connection to FPolicy server
:param node: Cluster node name.
:param policy_name: Name of the policy.
:param server: FPolicy server.
"""
return self.request( "fpolicy-server-disconnect", {
'node': [ node, 'node', [ basestring, 'None' ], False ],
'policy_name': [ policy_name, 'policy-name', [ basestring, 'policy-name' ], False ],
'server': [ server, 'server', [ basestring, 'ip-address' ], False ],
}, {
} )
def fpolicy_volume_list_set(self, policy_name, list_type, volumes):
"""
Manipulate a list of volumes in an exclude or include set.
This limits the set of volumes for which client requests
trigger (include) or suppress (exclude) fpolicy processing
for the provided policy.
The list provided will replace the list currently in place,
if any. Note that if a policy has both an exclude list and
an include list, the include list is ignored by the filer.
:param policy_name: Name of the policy.
:param list_type: Defines to which set (exclude or include) a list
will be applied.
Possible values: "exclude", "include".
:param volumes: List of volume specifications.
"""
return self.request( "fpolicy-volume-list-set", {
'policy_name': [ policy_name, 'policy-name', [ basestring, 'None' ], False ],
'list_type': [ list_type, 'list-type', [ basestring, 'None' ], False ],
'volumes': [ volumes, 'volumes', [ FpolicyVolumesListInfo, 'None' ], True ],
}, {
} )
def fpolicy_set_required(self, policy_name, required):
"""
Sets policy's "required" option to on/off.
:param policy_name: Name of the policy.
:param required: Indicator if the policy is required. If set to true,
the request will fail if there is no server to evaluate it.
If it's false, the request will succeed.
"""
return self.request( "fpolicy-set-required", {
'policy_name': [ policy_name, 'policy-name', [ basestring, 'None' ], False ],
'required': [ required, 'required', [ bool, 'None' ], False ],
}, {
} )
def fpolicy_enable(self):
"""
Sets options fpolicy enable to on.
"""
return self.request( "fpolicy-enable", {
}, {
} )
def fpolicy_server_stop(self, server_ip, policy_name):
"""
Stops specific primary server serving the policy.
Effectively, this will unregister the fpolicy server.
:param server_ip: The ip address, in dotted-decimal format, of the server.
:param policy_name: Name of the policy.
"""
return self.request( "fpolicy-server-stop", {
'server_ip': [ server_ip, 'server-ip', [ basestring, 'ip-address' ], False ],
'policy_name': [ policy_name, 'policy-name', [ basestring, 'None' ], False ],
}, {
} )
def fpolicy_server_connect(self, node, policy_name, server):
"""
Make a connection to FPolicy server
:param node: Cluster node name.
:param policy_name: Name of the policy.
:param server: FPolicy server.
"""
return self.request( "fpolicy-server-connect", {
'node': [ node, 'node', [ basestring, 'None' ], False ],
'policy_name': [ policy_name, 'policy-name', [ basestring, 'policy-name' ], False ],
'server': [ server, 'server', [ basestring, 'ip-address' ], False ],
}, {
} )
def fpolicy_get_required_info(self, policy_name):
"""
Shows current options for the policy.
:param policy_name: Name of the policy.
"""
return self.request( "fpolicy-get-required-info", {
'policy_name': [ policy_name, 'policy-name', [ basestring, 'None' ], False ],
}, {
'is-required': [ bool, False ],
} )
def fpolicy_disable_policy(self, policy_name):
"""
Disables a specific named policy.
:param policy_name: Name of the policy.
"""
return self.request( "fpolicy-disable-policy", {
'policy_name': [ policy_name, 'policy-name', [ basestring, 'None' ], False ],
}, {
} )
def fpolicy_enable_policy(self, policy_name, sequence_number):
"""
Enables a specific named policy. The operation will fail
if the policy doesn't exist.
:param policy_name: Name of the policy.
:param sequence_number: Policy Sequence Number
"""
return self.request( "fpolicy-enable-policy", {
'policy_name': [ policy_name, 'policy-name', [ basestring, 'None' ], False ],
'sequence_number': [ sequence_number, 'sequence-number', [ int, 'None' ], False ],
}, {
} )
def fpolicy_policy_modify(self, policy_name, engine_name=None, privileged_user_name=None, events=None, is_mandatory=None, allow_privileged_access=None):
"""
Modify a policy.
:param policy_name: Name of the policy.
:param engine_name: Name of the Engine. Default Engine is 'native'.
:param privileged_user_name: User name for privileged access. No default value is set for this
attribute.
:param events: Events for file access monitoring.
:param is_mandatory: Indicator if the screening with this policy is required, i.e. it
will fail if no servers are able process the notification
registered as a part of external engine. If set to true, the
request will fail if there is no server to evaluate it. If it's
false, the request will succeed. Default value is true.
:param allow_privileged_access: Indicator if privileged access should be given to FPolicy servers
registered for the policy. Default Value is no.
"""
return self.request( "fpolicy-policy-modify", {
'engine_name': [ engine_name, 'engine-name', [ basestring, 'engine-name' ], False ],
'privileged_user_name': [ privileged_user_name, 'privileged-user-name', [ basestring, 'None' ], False ],
'policy_name': [ policy_name, 'policy-name', [ basestring, 'policy-name' ], False ],
'events': [ events, 'events', [ basestring, 'event-name' ], True ],
'is_mandatory': [ is_mandatory, 'is-mandatory', [ bool, 'None' ], False ],
'allow_privileged_access': [ allow_privileged_access, 'allow-privileged-access', [ bool, 'None' ], False ],
}, {
} )
def fpolicy_policy_create(self, engine_name, policy_name, events, privileged_user_name=None, return_record=None, is_mandatory=None, allow_privileged_access=None):
"""
Create a policy.
:param engine_name: Name of the Engine. Default Engine is 'native'.
:param policy_name: Name of the policy.
:param events: Events for file access monitoring.
:param privileged_user_name: User name for privileged access. No default value is set for this
attribute.
:param return_record: If set to true, returns the fpolicy-policy on successful
creation.
Default: false
:param is_mandatory: Indicator if the screening with this policy is required, i.e. it
will fail if no servers are able process the notification
registered as a part of external engine. If set to true, the
request will fail if there is no server to evaluate it. If it's
false, the request will succeed. Default value is true.
:param allow_privileged_access: Indicator if privileged access should be given to FPolicy servers
registered for the policy. Default Value is no.
"""
return self.request( "fpolicy-policy-create", {
'engine_name': [ engine_name, 'engine-name', [ basestring, 'engine-name' ], False ],
'privileged_user_name': [ privileged_user_name, 'privileged-user-name', [ basestring, 'None' ], False ],
'policy_name': [ policy_name, 'policy-name', [ basestring, 'policy-name' ], False ],
'return_record': [ return_record, 'return-record', [ bool, 'None' ], False ],
'events': [ events, 'events', [ basestring, 'event-name' ], True ],
'is_mandatory': [ is_mandatory, 'is-mandatory', [ bool, 'None' ], False ],
'allow_privileged_access': [ allow_privileged_access, 'allow-privileged-access', [ bool, 'None' ], False ],
}, {
'result': [ FpolicyPolicyInfo, False ],
} )
def fpolicy_policy_event_modify(self, event_name, volume_operation=None, protocol=None, file_operations=None, filter_string=None):
"""
Set FPolicy event options. FPolicy event is consist of protocol,
file operation, volume operation and f
ilters.
:param event_name: Name of the Event.
:param volume_operation: Indicator if the volume operation required for the event.Default
Value is false.
:param protocol: Name of protocol for which event is created. By default no
protocol is selected.
Possible values:
<ul>
<li> "cifs" - CIFS protocol,
<li> "nfsv3" - NFSv3 protocol,
<li> "nfsv4" - NFSv4 protocol
</ul>
:param file_operations: Name of file operations. By default no operations are monitored.
Possible values:
<ul>
<li> "close" - File close operation,
<li> "create" - File create operation,
<li> "create_dir" - File create directory operation,
<li> "delete" - File delete operation,
<li> "delete_dir" - Directory delete operation,
<li> "getattr" - Get attribute operation,
<li> "link" - Link operation,
<li> "lookup" - Lookup operation,
<li> "open" - File open operation,
<li> "read" - File read operation,
<li> "write" - File write operation,
<li> "rename" - File rename operation,
<li> "rename_dir" - Directory rename operation,
<li> "setattr" - Set attribute operation,
<li> "symlink" - Symbolic link operation
</ul>
:param filter_string: Name of filters. It is notification filtering parameters. By
default no filters are selected.
Possible values:
<ul>
<li> "monitor_ads" - Monitor alternate data
stream,
<li> "close_with_modification" - Filter close with
modification,
<li> "close_without_modification" - Filter close without
modification,
<li> "first_read" - Filter first read,
<li> "first_write" - Filter first write,
<li> "offline_bit" - Filter offline bit set,
<li> "open_with_delete_intent" - Filter open with delete
intent,
<li> "open_with_write_intent" - Filter open with write
intent,
<li> "write_with_size_change" - Filter write with size
change
</ul>
"""
return self.request( "fpolicy-policy-event-modify", {
'volume_operation': [ volume_operation, 'volume-operation', [ bool, 'None' ], False ],
'protocol': [ protocol, 'protocol', [ basestring, 'fpolicy-proto' ], False ],
'file_operations': [ file_operations, 'file-operations', [ basestring, 'fpolicy-operation' ], True ],
'event_name': [ event_name, 'event-name', [ basestring, 'event-name' ], False ],
'filter_string': [ filter_string, 'filter-string', [ basestring, 'fpolicy-filter' ], True ],
}, {
} )
def fpolicy_operations_list_set(self, monitored_operations, policy_name, monitored_protocols, force=None, offline_only=None):
"""
Manipulate a list of operations and network protocols
for a policy.
This determines which user requests cause the filer to
notify fpolicy servers for this policy.
The list provided will replace the list currently in place,
if any. Note that this can be confusing to a server which has
already connected to a policy and provided a list of
operations. For example, it may have requested notifications
when users open files, but start receiving notifications
when users create symlinks.
This API is provided in support of "native file blocking"
in which there is no server connected to the filer for a
policy.
Note that it is possible to get the list of operations and
protocols currently set for a policy with the
fpolicy-list-info API.
:param monitored_operations: List of operations related values.
:param policy_name: Name of the policy.
:param monitored_protocols: List of protocol related values.
:param force: If a server is connected to the filer and has already
set the list of operations, should this API override
the server's setting? If "force" is "true", the policy's
set of operations will be dropped and replaced with the
values provided by this API.
Default value is false.
:param offline_only: Sets the state of offline filtering. If offline filtering
is set, then only user requests for files which are marked
"offline" cause notifications.
Default value is false.
"""
return self.request( "fpolicy-operations-list-set", {
'monitored_operations': [ monitored_operations, 'monitored-operations', [ MonitoredOperationInfo, 'None' ], True ],
'policy_name': [ policy_name, 'policy-name', [ basestring, 'None' ], False ],
'force': [ force, 'force', [ bool, 'None' ], False ],
'offline_only': [ offline_only, 'offline-only', [ bool, 'None' ], False ],
'monitored_protocols': [ monitored_protocols, 'monitored-protocols', [ MonitoredProtocolInfo, 'None' ], True ],
}, {
} )
def fpolicy_volume_list_info(self, policy_name):
"""
Returns a volume-regular-expression list for an exclude
or include set.
The list describes limits to the set of volumes for which
client requests trigger (include) or suppress (exclude)
fpolicy processing for the provided policy.
:param policy_name: Name of the policy.
"""
return self.request( "fpolicy-volume-list-info", {
'policy_name': [ policy_name, 'policy-name', [ basestring, 'None' ], False ],
}, {
'include-volumes': [ FpolicyVolumesListInfo, True ],
'exclude-volumes': [ FpolicyVolumesListInfo, True ],
} )
def fpolicy_policy_get_iter(self, max_records=None, query=None, tag=None, desired_attributes=None):
"""
Returns information about policies.
:param max_records: The maximum number of records to return in this call.
Default: 20
:param query: A query that specifies which objects to return.
A query could be specified on any number of attributes in the
fpolicy-policy object.
All fpolicy-policy objects matching this query up to
'max-records' will be returned.
:param tag: Specify the tag from the last call.
It is usually not specified for the first call. For subsequent
calls, copy values from the 'next-tag' obtained from the previous
call.
:param desired_attributes: Specify the attributes that should be returned.
If not present, all attributes for which information is available
will be returned.
If present, only the desired attributes for which information is
available will be returned.
"""
return self.request( "fpolicy-policy-get-iter", {
'max_records': max_records,
'query': [ query, 'query', [ FpolicyPolicyInfo, 'None' ], False ],
'tag': tag,
'desired_attributes': [ desired_attributes, 'desired-attributes', [ FpolicyPolicyInfo, 'None' ], False ],
}, {
'attributes-list': [ FpolicyPolicyInfo, True ],
} )
def fpolicy_policy_event_delete(self, event_name):
"""
Delete FPolicy event.
:param event_name: Name of the Event.
"""
return self.request( "fpolicy-policy-event-delete", {
'event_name': [ event_name, 'event-name', [ basestring, 'event-name' ], False ],
}, {
} )
def fpolicy_extensions_list_info(self, policy_name):
"""
Returns information on existing extension sets.
:param policy_name: Name of the policy.
"""
return self.request( "fpolicy-extensions-list-info", {
'policy_name': [ policy_name, 'policy-name', [ basestring, 'None' ], False ],
}, {
'exclude-extensions': [ ExtensionListInfo, True ],
'include-extensions': [ ExtensionListInfo, True ],
} )
def fpolicy_policy_external_engine_create(self, engine_name, port_number, primary_servers, ssl_option, certificate_serial=None, server_progress_timeout=None, secondary_servers=None, certificate_ca=None, request_cancel_timeout=None, return_record=None, certificate_common_name=None, keep_alive_interval=None, extern_engine_type=None, max_connection_retries=None, request_abort_timeout=None, max_server_requests=None, status_request_interval=None):
"""
Create an external engine.
:param engine_name: Name of the external engine.
:param port_number: Port number of the FPolicy server application.
:param primary_servers: Primary FPolicy servers.
:param ssl_option: SSL option for external communication. No default value is set
for this field.
Possible values:
<ul>
<li> "no_auth" - Communication over TCP,
<li> "server_auth" - Authentication of FPolicy server only,
<li> "mutual_auth" - Mutual authentication of storage system
and FPolicy server
</ul>
:param certificate_serial: Serial number of certificate. No default value is set for this
field.
:param server_progress_timeout: Timeout in seconds in which a throttled FPolicy server must
complete at least one screen request. If no request is processed
within the timeout, connection to FPolicy server is terminated.
Default value set for this field is 60 seconds.
:param secondary_servers: Secondary FPolicy servers. No default value is set for this
field.
:param certificate_ca: Certificate authority name. No default value is set for this
field.
:param request_cancel_timeout: Timeout in seconds for a screen request to be processed by an
FPolicy server. Default value set for this field is 20 seconds.
:param return_record: If set to true, returns the fpolicy-policy-external-engine on
successful creation.
Default: false
:param certificate_common_name: FQDN or custom common name of certificate. No default value is
set for this field.
:param keep_alive_interval: Interval time in seconds for storage appliance to send keep-alive
message to FPolicy server. Default value set for this field is 10
seconds.
:param extern_engine_type: External engine type. If the engine is asynchronous, no reply is
sent from FPolicy servers. Default value set for this field is
synchronous.
Possible values:
<ul>
<li> "synchronous" - Synchronous External Engine,
<li> "asynchronous" - Asynchronous External Engine
</ul>
:param max_connection_retries: Number of times storage appliance will attempt to establish a
broken connection to FPolicy server. Default value set for this
field is 5.
:param request_abort_timeout: Timeout in seconds for a screen request to be aborted by storage
appliance. Default value set for this field is 40 seconds.
:param max_server_requests: Maximum number of outstanding screen requests that will be queued
for an FPolicy Server. Default value set for this field is 50.
:param status_request_interval: Interval time in seconds for storage appliance to query status
request from FPolicy server. Default value set for this field is
10 seconds.
"""
return self.request( "fpolicy-policy-external-engine-create", {
'engine_name': [ engine_name, 'engine-name', [ basestring, 'engine-name' ], False ],
'certificate_serial': [ certificate_serial, 'certificate-serial', [ basestring, 'None' ], False ],
'server_progress_timeout': [ server_progress_timeout, 'server-progress-timeout', [ int, 'None' ], False ],
'secondary_servers': [ secondary_servers, 'secondary-servers', [ basestring, 'ip-address' ], True ],
'certificate_ca': [ certificate_ca, 'certificate-ca', [ basestring, 'None' ], False ],
'request_cancel_timeout': [ request_cancel_timeout, 'request-cancel-timeout', [ int, 'None' ], False ],
'port_number': [ port_number, 'port-number', [ int, 'None' ], False ],
'return_record': [ return_record, 'return-record', [ bool, 'None' ], False ],
'certificate_common_name': [ certificate_common_name, 'certificate-common-name', [ basestring, 'common-name' ], False ],
'keep_alive_interval': [ keep_alive_interval, 'keep-alive-interval', [ int, 'None' ], False ],
'primary_servers': [ primary_servers, 'primary-servers', [ basestring, 'ip-address' ], True ],
'extern_engine_type': [ extern_engine_type, 'extern-engine-type', [ basestring, 'external-engine-type' ], False ],
'max_connection_retries': [ max_connection_retries, 'max-connection-retries', [ int, 'None' ], False ],
'request_abort_timeout': [ request_abort_timeout, 'request-abort-timeout', [ int, 'None' ], False ],
'ssl_option': [ ssl_option, 'ssl-option', [ basestring, 'fpolicy-ssl-opts' ], False ],
'max_server_requests': [ max_server_requests, 'max-server-requests', [ int, 'None' ], False ],
'status_request_interval': [ status_request_interval, 'status-request-interval', [ int, 'None' ], False ],
}, {
'result': [ FpolicyExternalEngineInfo, False ],
} )
def fpolicy_policy_status_get_iter(self, max_records=None, query=None, tag=None, desired_attributes=None):
"""
Returns FPolicy policy status information.
:param max_records: The maximum number of records to return in this call.
Default: 20
:param query: A query that specifies which objects to return.
A query could be specified on any number of attributes in the
fpolicy-policy-status object.
All fpolicy-policy-status objects matching this query up to
'max-records' will be returned.
:param tag: Specify the tag from the last call.
It is usually not specified for the first call. For subsequent
calls, copy values from the 'next-tag' obtained from the previous
call.
:param desired_attributes: Specify the attributes that should be returned.
If not present, all attributes for which information is available
will be returned.
If present, only the desired attributes for which information is
available will be returned.
"""
return self.request( "fpolicy-policy-status-get-iter", {
'max_records': max_records,
'query': [ query, 'query', [ FpolicyPolicyStatusInfo, 'None' ], False ],
'tag': tag,
'desired_attributes': [ desired_attributes, 'desired-attributes', [ FpolicyPolicyStatusInfo, 'None' ], False ],
}, {
'attributes-list': [ FpolicyPolicyStatusInfo, True ],
} )
def fpolicy_policy_delete(self, policy_name):
"""
Delete a policy.
:param policy_name: Name of the policy.
"""
return self.request( "fpolicy-policy-delete", {
'policy_name': [ policy_name, 'policy-name', [ basestring, 'policy-name' ], False ],
}, {
} )
def fpolicy_set_policy_options(self, policy_name, reqcancel_timeout=None, is_required=None, is_ads_monitored=None, secondary_servers=None, serverprogress_timeout=None, is_cifs_disconnect_check_enabled=None, is_cifs_setattr_enabled=None):
"""
Sets policy's options to on/off.
:param policy_name: Name of the policy.
:param reqcancel_timeout: Timeout (in secs) for a screen request to be processed by an
FPolicy server.
Range : [0..4294967].
:param is_required: Indicator if the screening with this policy is required,
i.e. will it fail if the server is not registered.
If set to true, the request will fail if there is no
server to evaluate it. If it's false, the request will succeed.
Default is false.
:param is_ads_monitored: Indicates if the policy monitors the cifs operations
on Alternate Data Streams.
Default is false.
:param secondary_servers: List of server's IP addresses. Servers registered
from these IP will be considered as secondary servers.
:param serverprogress_timeout: Timeout (in secs) in which a throttled FPolicy server must
complete at least one screen request.
Range : [0..4294967].
:param is_cifs_disconnect_check_enabled: 'true' if requests associated with disconnected CIFS sessions
must not be screened, 'false' otherwise.
:param is_cifs_setattr_enabled: Indicator whether cifs-setattr support is enabled
on this policy or not. If set to true, cifs setattr
operations will be screened.
Default is false.
"""
return self.request( "fpolicy-set-policy-options", {
'reqcancel_timeout': [ reqcancel_timeout, 'reqcancel-timeout', [ int, 'None' ], False ],
'is_required': [ is_required, 'is-required', [ bool, 'None' ], False ],
'is_ads_monitored': [ is_ads_monitored, 'is-ads-monitored', [ bool, 'None' ], False ],
'secondary_servers': [ secondary_servers, 'secondary-servers', [ SecondaryServerInfo, 'None' ], True ],
'policy_name': [ policy_name, 'policy-name', [ basestring, 'None' ], False ],
'serverprogress_timeout': [ serverprogress_timeout, 'serverprogress-timeout', [ int, 'None' ], False ],
'is_cifs_disconnect_check_enabled': [ is_cifs_disconnect_check_enabled, 'is-cifs-disconnect-check-enabled', [ bool, 'None' ], False ],
'is_cifs_setattr_enabled': [ is_cifs_setattr_enabled, 'is-cifs-setattr-enabled', [ bool, 'None' ], False ],
}, {
} )
def fpolicy_set_secondary_servers(self, secondary_servers, policy_name):
"""
Sets secondary servers information in a form of
a list of ip addresses. These servers will be used
if all primary servers are not available, thus increasing
system availabilty.
:param secondary_servers: List of servers' IP addresses.
:param policy_name: Name of the policy.
"""
return self.request( "fpolicy-set-secondary-servers", {
'secondary_servers': [ secondary_servers, 'secondary-servers', [ SecondaryServerInfo, 'None' ], True ],
'policy_name': [ policy_name, 'policy-name', [ basestring, 'None' ], False ],
}, {
} )
def fpolicy_server_status_get_iter(self, max_records=None, query=None, tag=None, desired_attributes=None):
"""
Returns FPolicy server status information.
:param max_records: The maximum number of records to return in this call.
Default: 20
:param query: A query that specifies which objects to return.
A query could be specified on any number of attributes in the
fpolicy-server-status object.
All fpolicy-server-status objects matching this query up to
'max-records' will be returned.
:param tag: Specify the tag from the last call.
It is usually not specified for the first call. For subsequent
calls, copy values from the 'next-tag' obtained from the previous
call.
:param desired_attributes: Specify the attributes that should be returned.
If not present, all attributes for which information is available
will be returned.
If present, only the desired attributes for which information is
available will be returned.
"""
return self.request( "fpolicy-server-status-get-iter", {
'max_records': max_records,
'query': [ query, 'query', [ FpolicyServerStatusInfo, 'None' ], False ],
'tag': tag,
'desired_attributes': [ desired_attributes, 'desired-attributes', [ FpolicyServerStatusInfo, 'None' ], False ],
}, {
'attributes-list': [ FpolicyServerStatusInfo, True ],
} )
def fpolicy_policy_external_engine_modify(self, engine_name, certificate_serial=None, server_progress_timeout=None, secondary_servers=None, certificate_ca=None, request_cancel_timeout=None, port_number=None, certificate_common_name=None, keep_alive_interval=None, primary_servers=None, extern_engine_type=None, max_connection_retries=None, request_abort_timeout=None, ssl_option=None, max_server_requests=None, status_request_interval=None):
"""
Modify an external engine. External engine can be modified only
when none of the enabled policies are using it.
:param engine_name: Name of the external engine.
:param certificate_serial: Serial number of certificate. No default value is set for this
field.
:param server_progress_timeout: Timeout in seconds in which a throttled FPolicy server must
complete at least one screen request. If no request is processed
within the timeout, connection to FPolicy server is terminated.
Default value set for this field is 60 seconds.
:param secondary_servers: Secondary FPolicy servers. No default value is set for this
field.
:param certificate_ca: Certificate authority name. No default value is set for this
field.
:param request_cancel_timeout: Timeout in seconds for a screen request to be processed by an
FPolicy server. Default value set for this field is 20 seconds.
:param port_number: Port number of the FPolicy server application.
:param certificate_common_name: FQDN or custom common name of certificate. No default value is
set for this field.
:param keep_alive_interval: Interval time in seconds for storage appliance to send keep-alive
message to FPolicy server. Default value set for this field is 10
seconds.
:param primary_servers: Primary FPolicy servers.
:param extern_engine_type: External engine type. If the engine is asynchronous, no reply is
sent from FPolicy servers. Default value set for this field is
synchronous.
Possible values:
<ul>
<li> "synchronous" - Synchronous External Engine,
<li> "asynchronous" - Asynchronous External Engine
</ul>
:param max_connection_retries: Number of times storage appliance will attempt to establish a
broken connection to FPolicy server. Default value set for this
field is 5.
:param request_abort_timeout: Timeout in seconds for a screen request to be aborted by storage
appliance. Default value set for this field is 40 seconds.
:param ssl_option: SSL option for external communication. No default value is set
for this field.
Possible values:
<ul>
<li> "no_auth" - Communication over TCP,
<li> "server_auth" - Authentication of FPolicy server only,
<li> "mutual_auth" - Mutual authentication of storage system
and FPolicy server
</ul>
:param max_server_requests: Maximum number of outstanding screen requests that will be queued
for an FPolicy Server. Default value set for this field is 50.
:param status_request_interval: Interval time in seconds for storage appliance to query status
request from FPolicy server. Default value set for this field is
10 seconds.
"""
return self.request( "fpolicy-policy-external-engine-modify", {
'engine_name': [ engine_name, 'engine-name', [ basestring, 'engine-name' ], False ],
'certificate_serial': [ certificate_serial, 'certificate-serial', [ basestring, 'None' ], False ],
'server_progress_timeout': [ server_progress_timeout, 'server-progress-timeout', [ int, 'None' ], False ],
'secondary_servers': [ secondary_servers, 'secondary-servers', [ basestring, 'ip-address' ], True ],
'certificate_ca': [ certificate_ca, 'certificate-ca', [ basestring, 'None' ], False ],
'request_cancel_timeout': [ request_cancel_timeout, 'request-cancel-timeout', [ int, 'None' ], False ],
'port_number': [ port_number, 'port-number', [ int, 'None' ], False ],
'certificate_common_name': [ certificate_common_name, 'certificate-common-name', [ basestring, 'common-name' ], False ],
'keep_alive_interval': [ keep_alive_interval, 'keep-alive-interval', [ int, 'None' ], False ],
'primary_servers': [ primary_servers, 'primary-servers', [ basestring, 'ip-address' ], True ],
'extern_engine_type': [ extern_engine_type, 'extern-engine-type', [ basestring, 'external-engine-type' ], False ],
'max_connection_retries': [ max_connection_retries, 'max-connection-retries', [ int, 'None' ], False ],
'request_abort_timeout': [ request_abort_timeout, 'request-abort-timeout', [ int, 'None' ], False ],
'ssl_option': [ ssl_option, 'ssl-option', [ basestring, 'fpolicy-ssl-opts' ], False ],
'max_server_requests': [ max_server_requests, 'max-server-requests', [ int, 'None' ], False ],
'status_request_interval': [ status_request_interval, 'status-request-interval', [ int, 'None' ], False ],
}, {
} )
def fpolicy_get_secondary_servers_info(self, policy_name):
"""
Shows current options for the policy.
:param policy_name: Name of the policy.
"""
return self.request( "fpolicy-get-secondary-servers-info", {
'policy_name': [ policy_name, 'policy-name', [ basestring, 'None' ], False ],
}, {
'secondary-servers': [ SecondaryServerInfo, True ],
} )
def fpolicy_disable(self):
"""
Sets options fpolicy enable to off.
"""
return self.request( "fpolicy-disable", {
}, {
} )
def fpolicy_create_policy(self, policy_name, policy_type):
"""
Creates a new policy.
:param policy_name: Name of the policy.
:param policy_type: Type of the policy. Possible values: "screen".
"""
return self.request( "fpolicy-create-policy", {
'policy_name': [ policy_name, 'policy-name', [ basestring, 'None' ], False ],
'policy_type': [ policy_type, 'policy-type', [ basestring, 'None' ], False ],
}, {
} )
def fpolicy_server_list_info(self, policy_name):
"""
Shows a list of primary servers serving the policy.
:param policy_name: Name of the policy.
"""
return self.request( "fpolicy-server-list-info", {
'policy_name': [ policy_name, 'policy-name', [ basestring, 'None' ], False ],
}, {
'servers': [ ServerInfo, True ],
} )
def fpolicy_policy_event_create(self, event_name, protocol=None, volume_operation=None, return_record=None, filter_string=None, file_operations=None):
"""
Create FPolicy Event.
:param event_name: Name of the Event.
:param protocol: Name of protocol for which event is created. By default no
protocol is selected.
Possible values:
<ul>
<li> "cifs" - CIFS protocol,
<li> "nfsv3" - NFSv3 protocol,
<li> "nfsv4" - NFSv4 protocol
</ul>
:param volume_operation: Indicator if the volume operation required for the event.Default
Value is false.
:param return_record: If set to true, returns the fpolicy-policy-event on successful
creation.
Default: false
:param filter_string: Name of filters. It is notification filtering parameters. By
default no filters are selected.
Possible values:
<ul>
<li> "monitor_ads" - Monitor alternate data
stream,
<li> "close_with_modification" - Filter close with
modification,
<li> "close_without_modification" - Filter close without
modification,
<li> "first_read" - Filter first read,
<li> "first_write" - Filter first write,
<li> "offline_bit" - Filter offline bit set,
<li> "open_with_delete_intent" - Filter open with delete
intent,
<li> "open_with_write_intent" - Filter open with write
intent,
<li> "write_with_size_change" - Filter write with size
change
</ul>
:param file_operations: Name of file operations. By default no operations are monitored.
Possible values:
<ul>
<li> "close" - File close operation,
<li> "create" - File create operation,
<li> "create_dir" - File create directory operation,
<li> "delete" - File delete operation,
<li> "delete_dir" - Directory delete operation,
<li> "getattr" - Get attribute operation,
<li> "link" - Link operation,
<li> "lookup" - Lookup operation,
<li> "open" - File open operation,
<li> "read" - File read operation,
<li> "write" - File write operation,
<li> "rename" - File rename operation,
<li> "rename_dir" - Directory rename operation,
<li> "setattr" - Set attribute operation,
<li> "symlink" - Symbolic link operation
</ul>
"""
return self.request( "fpolicy-policy-event-create", {
'protocol': [ protocol, 'protocol', [ basestring, 'fpolicy-proto' ], False ],
'volume_operation': [ volume_operation, 'volume-operation', [ bool, 'None' ], False ],
'return_record': [ return_record, 'return-record', [ bool, 'None' ], False ],
'event_name': [ event_name, 'event-name', [ basestring, 'event-name' ], False ],
'filter_string': [ filter_string, 'filter-string', [ basestring, 'fpolicy-filter' ], True ],
'file_operations': [ file_operations, 'file-operations', [ basestring, 'fpolicy-operation' ], True ],
}, {
'result': [ FpolicyEventOptionsConfig, False ],
} )
def fpolicy_policy_scope_create(self, policy_name, export_policies_to_include=None, volumes_to_exclude=None, file_extensions_to_exclude=None, export_policies_to_exclude=None, check_extensions_on_directories=None, return_record=None, volumes_to_include=None, shares_to_exclude=None, file_extensions_to_include=None, shares_to_include=None):
"""
Set FPolicy scope options. FPolicy Scope is consist of share,
volume, export policy, volume, file extention.
:param policy_name: Name of the policy.
:param export_policies_to_include: Export policies to include for file access monitoring. By default
no export policy is selected.
:param volumes_to_exclude: Volumes that are inactive for the file policy. The list can
include items which are regular expressions, such as 'vol*' or
'user?'. Note that if a policy has both an exclude list and an
include list, the include list is ignored by the filer when
processing user requests. By default no volume is selected.
:param file_extensions_to_exclude: File extensions excluded for screening. By default no file
extension is selected.
:param export_policies_to_exclude: Export Policies to exclude for file access monitoring. By default
no export policy is selected.
:param check_extensions_on_directories: Indicates whether directory names are also subjected to
extensions check, similar to file names. By default, the value is
false.
:param return_record: If set to true, returns the fpolicy-policy-scope on successful
creation.
Default: false
:param volumes_to_include: Volumes that are active for the file policy. The list can include
items which are regular expressions, such as 'vol*' or 'user?'.
By default no volume is selected.
:param shares_to_exclude: Shares to exclude for file access monitoring. By default no share
is selected.
:param file_extensions_to_include: File extensions included for screening. By default no file
extension is selected.
:param shares_to_include: Shares to include for file access monitoring. By default no share
is selected.
"""
return self.request( "fpolicy-policy-scope-create", {
'export_policies_to_include': [ export_policies_to_include, 'export-policies-to-include', [ basestring, 'None' ], True ],
'volumes_to_exclude': [ volumes_to_exclude, 'volumes-to-exclude', [ basestring, 'None' ], True ],
'file_extensions_to_exclude': [ file_extensions_to_exclude, 'file-extensions-to-exclude', [ basestring, 'None' ], True ],
'policy_name': [ policy_name, 'policy-name', [ basestring, 'policy-name' ], False ],
'export_policies_to_exclude': [ export_policies_to_exclude, 'export-policies-to-exclude', [ basestring, 'None' ], True ],
'check_extensions_on_directories': [ check_extensions_on_directories, 'check-extensions-on-directories', [ bool, 'None' ], False ],
'return_record': [ return_record, 'return-record', [ bool, 'None' ], False ],
'volumes_to_include': [ volumes_to_include, 'volumes-to-include', [ basestring, 'None' ], True ],
'shares_to_exclude': [ shares_to_exclude, 'shares-to-exclude', [ basestring, 'None' ], True ],
'file_extensions_to_include': [ file_extensions_to_include, 'file-extensions-to-include', [ basestring, 'None' ], True ],
'shares_to_include': [ shares_to_include, 'shares-to-include', [ basestring, 'None' ], True ],
}, {
'result': [ FpolicyScopeConfig, False ],
} )
def fpolicy_policy_scope_modify(self, policy_name, export_policies_to_include=None, volumes_to_exclude=None, file_extensions_to_exclude=None, export_policies_to_exclude=None, check_extensions_on_directories=None, volumes_to_include=None, shares_to_exclude=None, file_extensions_to_include=None, shares_to_include=None):
"""
Set FPolicy scope options. FPolicy Scope is consist of share,
volume, export policy, volume, file extention.
:param policy_name: Name of the policy.
:param export_policies_to_include: Export policies to include for file access monitoring. By default
no export policy is selected.
:param volumes_to_exclude: Volumes that are inactive for the file policy. The list can
include items which are regular expressions, such as 'vol*' or
'user?'. Note that if a policy has both an exclude list and an
include list, the include list is ignored by the filer when
processing user requests. By default no volume is selected.
:param file_extensions_to_exclude: File extensions excluded for screening. By default no file
extension is selected.
:param export_policies_to_exclude: Export Policies to exclude for file access monitoring. By default
no export policy is selected.
:param check_extensions_on_directories: Indicates whether directory names are also subjected to
extensions check, similar to file names. By default, the value is
false.
:param volumes_to_include: Volumes that are active for the file policy. The list can include
items which are regular expressions, such as 'vol*' or 'user?'.
By default no volume is selected.
:param shares_to_exclude: Shares to exclude for file access monitoring. By default no share
is selected.
:param file_extensions_to_include: File extensions included for screening. By default no file
extension is selected.
:param shares_to_include: Shares to include for file access monitoring. By default no share
is selected.
"""
return self.request( "fpolicy-policy-scope-modify", {
'export_policies_to_include': [ export_policies_to_include, 'export-policies-to-include', [ basestring, 'None' ], True ],
'volumes_to_exclude': [ volumes_to_exclude, 'volumes-to-exclude', [ basestring, 'None' ], True ],
'file_extensions_to_exclude': [ file_extensions_to_exclude, 'file-extensions-to-exclude', [ basestring, 'None' ], True ],
'policy_name': [ policy_name, 'policy-name', [ basestring, 'policy-name' ], False ],
'export_policies_to_exclude': [ export_policies_to_exclude, 'export-policies-to-exclude', [ basestring, 'None' ], True ],
'check_extensions_on_directories': [ check_extensions_on_directories, 'check-extensions-on-directories', [ bool, 'None' ], False ],
'volumes_to_include': [ volumes_to_include, 'volumes-to-include', [ basestring, 'None' ], True ],
'shares_to_exclude': [ shares_to_exclude, 'shares-to-exclude', [ basestring, 'None' ], True ],
'file_extensions_to_include': [ file_extensions_to_include, 'file-extensions-to-include', [ basestring, 'None' ], True ],
'shares_to_include': [ shares_to_include, 'shares-to-include', [ basestring, 'None' ], True ],
}, {
} )
def fpolicy_list_info(self, policy_name=None):
"""
Returns a list of existing policies.
:param policy_name: Name of the policy. If this parameter is set, policies
will have information pertaining to the policy named. If
there is no such a policy, policies will be empty.
"""
return self.request( "fpolicy-list-info", {
'policy_name': [ policy_name, 'policy-name', [ basestring, 'None' ], False ],
}, {
'policies': [ PolicyInfo, True ],
} )
def fpolicy_policy_scope_get_iter(self, max_records=None, query=None, tag=None, desired_attributes=None):
"""
Get a list of rows for FPolicy scope options. FPolicy Scope
consists of share, volume, export policy, volume, file
extention.
:param max_records: The maximum number of records to return in this call.
Default: 20
:param query: A query that specifies which objects to return.
A query could be specified on any number of attributes in the
fpolicy-policy-scope object.
All fpolicy-policy-scope objects matching this query up to
'max-records' will be returned.
:param tag: Specify the tag from the last call.
It is usually not specified for the first call. For subsequent
calls, copy values from the 'next-tag' obtained from the previous
call.
:param desired_attributes: Specify the attributes that should be returned.
If not present, all attributes for which information is available
will be returned.
If present, only the desired attributes for which information is
available will be returned.
"""
return self.request( "fpolicy-policy-scope-get-iter", {
'max_records': max_records,
'query': [ query, 'query', [ FpolicyScopeConfig, 'None' ], False ],
'tag': tag,
'desired_attributes': [ desired_attributes, 'desired-attributes', [ FpolicyScopeConfig, 'None' ], False ],
}, {
'attributes-list': [ FpolicyScopeConfig, True ],
} )
def fpolicy_destroy_policy(self, policy_name):
"""
Destroys existing policy.
:param policy_name: Name of the policy.
"""
return self.request( "fpolicy-destroy-policy", {
'policy_name': [ policy_name, 'policy-name', [ basestring, 'None' ], False ],
}, {
} )
def fpolicy_policy_external_engine_delete(self, engine_name):
"""
Delete an external engine.
:param engine_name: Name of the external engine.
"""
return self.request( "fpolicy-policy-external-engine-delete", {
'engine_name': [ engine_name, 'engine-name', [ basestring, 'engine-name' ], False ],
}, {
} )
def fpolicy_status(self):
"""
Returns status of options fpolicy enable.
"""
return self.request( "fpolicy-status", {
}, {
'is-enabled': [ bool, False ],
} )
def fpolicy_policy_scope_delete(self, policy_name):
"""
Delete a scope.
:param policy_name: Name of the policy.
"""
return self.request( "fpolicy-policy-scope-delete", {
'policy_name': [ policy_name, 'policy-name', [ basestring, 'policy-name' ], False ],
}, {
} )
def fpolicy_extensions(self, policy_name, set_name, command, extensions=None):
"""
Manipulates with list of extensions in
exclude or include set. Exlude set defines extension patterns
that won't trigger fpolicy processing.
:param policy_name: Name of the policy.
:param set_name: Defines to which set (exclude or include) a command
(add, remove, etc) will be applied to. For instance,
command = add, set-name = include will add specified
list of extensions to the include set.
Possible values: "exclude", "include".
:param command: Command to be applied on the specified set.
Supported values: "add", "remove", "set", "reset".
:param extensions: List of extensions. This element is required if the
the command input value is "add", "set" or "remove".
"""
return self.request( "fpolicy-extensions", {
'policy_name': [ policy_name, 'policy-name', [ basestring, 'None' ], False ],
'set_name': [ set_name, 'set-name', [ basestring, 'None' ], False ],
'command': [ command, 'command', [ basestring, 'None' ], False ],
'extensions': [ extensions, 'extensions', [ ExtensionListInfo, 'None' ], True ],
}, {
} )
def fpolicy_policy_external_engine_get_iter(self, max_records=None, query=None, tag=None, desired_attributes=None):
"""
Returns information on external engines.
:param max_records: The maximum number of records to return in this call.
Default: 20
:param query: A query that specifies which objects to return.
A query could be specified on any number of attributes in the
fpolicy-policy-external-engine object.
All fpolicy-policy-external-engine objects matching this query up
to 'max-records' will be returned.
:param tag: Specify the tag from the last call.
It is usually not specified for the first call. For subsequent
calls, copy values from the 'next-tag' obtained from the previous
call.
:param desired_attributes: Specify the attributes that should be returned.
If not present, all attributes for which information is available
will be returned.
If present, only the desired attributes for which information is
available will be returned.
"""
return self.request( "fpolicy-policy-external-engine-get-iter", {
'max_records': max_records,
'query': [ query, 'query', [ FpolicyExternalEngineInfo, 'None' ], False ],
'tag': tag,
'desired_attributes': [ desired_attributes, 'desired-attributes', [ FpolicyExternalEngineInfo, 'None' ], False ],
}, {
'attributes-list': [ FpolicyExternalEngineInfo, True ],
} )
def fpolicy_get_policy_options(self, policy_name):
"""
Shows value of policy options.
:param policy_name: Name of the policy.
"""
return self.request( "fpolicy-get-policy-options", {
'policy_name': [ policy_name, 'policy-name', [ basestring, 'None' ], False ],
}, {
'reqcancel-timeout': [ int, False ],
'is-required': [ bool, False ],
'is-ads-monitored': [ bool, False ],
'secondary-servers': [ SecondaryServerInfo, True ],
'serverprogress-timeout': [ int, False ],
'is-cifs-disconnect-check-enabled': [ bool, False ],
'is-cifs-setattr-enabled': [ bool, False ],
} )
def fpolicy_policy_event_get_iter(self, max_records=None, query=None, tag=None, desired_attributes=None):
"""
Get a list of rows for FPolicy event options. FPolicy event is
consist of protocol, file operations, vo
lume operation and filters.
:param max_records: The maximum number of records to return in this call.
Default: 20
:param query: A query that specifies which objects to return.
A query could be specified on any number of attributes in the
fpolicy-policy-event object.
All fpolicy-policy-event objects matching this query up to
'max-records' will be returned.
:param tag: Specify the tag from the last call.
It is usually not specified for the first call. For subsequent
calls, copy values from the 'next-tag' obtained from the previous
call.
:param desired_attributes: Specify the attributes that should be returned.
If not present, all attributes for which information is available
will be returned.
If present, only the desired attributes for which information is
available will be returned.
"""
return self.request( "fpolicy-policy-event-get-iter", {
'max_records': max_records,
'query': [ query, 'query', [ FpolicyEventOptionsConfig, 'None' ], False ],
'tag': tag,
'desired_attributes': [ desired_attributes, 'desired-attributes', [ FpolicyEventOptionsConfig, 'None' ], False ],
}, {
'attributes-list': [ FpolicyEventOptionsConfig, True ],
} )
| 51.748316
| 450
| 0.606422
| 6,729
| 61,477
| 5.39679
| 0.065983
| 0.038001
| 0.023351
| 0.028638
| 0.783726
| 0.740906
| 0.716261
| 0.707173
| 0.69346
| 0.668842
| 0
| 0.002232
| 0.30753
| 61,477
| 1,187
| 451
| 51.791912
| 0.850814
| 0.465995
| 0
| 0.553763
| 0
| 0
| 0.242488
| 0.086806
| 0
| 0
| 0
| 0
| 0
| 1
| 0.110215
| false
| 0
| 0.080645
| 0
| 0.303763
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
51d82a4a78317f4eced5c0075b91640e53d92d65
| 64
|
py
|
Python
|
create_browser.py
|
bannedcoder/selenium-easy-debug
|
657c7dea3df4c661198e5461abc95a5abaa9ab30
|
[
"MIT"
] | null | null | null |
create_browser.py
|
bannedcoder/selenium-easy-debug
|
657c7dea3df4c661198e5461abc95a5abaa9ab30
|
[
"MIT"
] | null | null | null |
create_browser.py
|
bannedcoder/selenium-easy-debug
|
657c7dea3df4c661198e5461abc95a5abaa9ab30
|
[
"MIT"
] | null | null | null |
from helpers.reusable_browser import *
create_driver_session()
| 16
| 38
| 0.84375
| 8
| 64
| 6.375
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09375
| 64
| 3
| 39
| 21.333333
| 0.87931
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
51eaff77f11bb6623d586c575371ad21566bce38
| 857
|
py
|
Python
|
prosperpy/overlays/bollinger_bands.py
|
CaptainBriot/prosperpy
|
831abb9c9e3b730c81895647e33a59854c4e4648
|
[
"MIT"
] | 2
|
2018-01-28T06:11:37.000Z
|
2018-02-04T16:01:30.000Z
|
prosperpy/overlays/bollinger_bands.py
|
CaptainBriot/prosperpy
|
831abb9c9e3b730c81895647e33a59854c4e4648
|
[
"MIT"
] | 1
|
2018-03-20T12:10:40.000Z
|
2018-03-21T00:08:04.000Z
|
prosperpy/overlays/bollinger_bands.py
|
CaptainBriot/prosperpy
|
831abb9c9e3b730c81895647e33a59854c4e4648
|
[
"MIT"
] | 2
|
2019-04-06T14:33:26.000Z
|
2020-06-25T23:34:32.000Z
|
from . import moving_average
from .. import indicators
class BollingerBands:
def __init__(self, values, multiplier=2, moving_average_class=moving_average.SimpleMovingAverage):
self.moving_average_class = moving_average_class
self.multiplier = multiplier
self.moving_average = self.moving_average_class(values)
self.standard_deviation = indicators.StandardDeviation(values)
def add(self, value):
self.moving_average.add(value)
self.standard_deviation.add(value)
@property
def upper(self):
return self.moving_average.value + (self.standard_deviation.value * self.multiplier)
@property
def lower(self):
return self.moving_average.value - (self.standard_deviation.value * self.multiplier)
@property
def bandwidth(self):
return self.upper - self.lower
| 31.740741
| 102
| 0.72112
| 98
| 857
| 6.081633
| 0.244898
| 0.218121
| 0.171141
| 0.130872
| 0.38255
| 0.278523
| 0.278523
| 0.278523
| 0.278523
| 0.278523
| 0
| 0.001449
| 0.194866
| 857
| 26
| 103
| 32.961538
| 0.862319
| 0
| 0
| 0.15
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.1
| 0.15
| 0.55
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
a4080e0aba5ecd64976ae2135ad806d510892f8d
| 93
|
py
|
Python
|
webapp/MemberApp/apps.py
|
AdaFactor/RamaProject
|
4da35480e2e7185a6d07f00cc8cdbf51898a7bb7
|
[
"MIT"
] | null | null | null |
webapp/MemberApp/apps.py
|
AdaFactor/RamaProject
|
4da35480e2e7185a6d07f00cc8cdbf51898a7bb7
|
[
"MIT"
] | 7
|
2018-02-16T11:18:24.000Z
|
2019-04-23T17:49:04.000Z
|
webapp/MemberApp/apps.py
|
AdaFactor/RamaProject
|
4da35480e2e7185a6d07f00cc8cdbf51898a7bb7
|
[
"MIT"
] | 1
|
2018-01-29T05:15:13.000Z
|
2018-01-29T05:15:13.000Z
|
from django.apps import AppConfig
class MemberappConfig(AppConfig):
name = 'MemberApp'
| 15.5
| 33
| 0.763441
| 10
| 93
| 7.1
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16129
| 93
| 5
| 34
| 18.6
| 0.910256
| 0
| 0
| 0
| 0
| 0
| 0.096774
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
cf8d37dd7cc02823d08160694115cee5f64df311
| 255
|
py
|
Python
|
essentials/old_on_352.py
|
sonicrules1234/sonicbot
|
07a22d08bf86ed33dc715a800957aee3b45f3dde
|
[
"BSD-3-Clause"
] | 1
|
2019-06-27T08:45:23.000Z
|
2019-06-27T08:45:23.000Z
|
essentials/old_on_352.py
|
sonicrules1234/sonicbot
|
07a22d08bf86ed33dc715a800957aee3b45f3dde
|
[
"BSD-3-Clause"
] | null | null | null |
essentials/old_on_352.py
|
sonicrules1234/sonicbot
|
07a22d08bf86ed33dc715a800957aee3b45f3dde
|
[
"BSD-3-Clause"
] | null | null | null |
import time
minlevel = 1
arguments = ["self", "info"]
keyword = "352"
def main(self, info) :
self.hostnames[info["words"][7]] = info["words"][5]
self.whoislist[info["words"][7]] = info["words"][7] + "!" + info["words"][4] + "@" + info["words"][5]
| 31.875
| 105
| 0.576471
| 35
| 255
| 4.2
| 0.485714
| 0.367347
| 0.204082
| 0.285714
| 0.326531
| 0
| 0
| 0
| 0
| 0
| 0
| 0.046083
| 0.14902
| 255
| 7
| 106
| 36.428571
| 0.631336
| 0
| 0
| 0
| 0
| 0
| 0.168627
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.142857
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
cf9459db5ea16ef4050b20b3369b43e59a337980
| 127
|
py
|
Python
|
instaapp/tests.py
|
martinmandina/InstagramApp
|
89ab7b8e0b85d49f220fec3c55327dedea9f1b47
|
[
"MIT"
] | null | null | null |
instaapp/tests.py
|
martinmandina/InstagramApp
|
89ab7b8e0b85d49f220fec3c55327dedea9f1b47
|
[
"MIT"
] | null | null | null |
instaapp/tests.py
|
martinmandina/InstagramApp
|
89ab7b8e0b85d49f220fec3c55327dedea9f1b47
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from .models import Profile,Image,Comments
import datetime as dt
# Create your tests here.
| 15.875
| 42
| 0.795276
| 19
| 127
| 5.315789
| 0.842105
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15748
| 127
| 7
| 43
| 18.142857
| 0.943925
| 0.181102
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
cfab7b4d1f8932a304f458648a1dfa8880c9d0fe
| 146
|
py
|
Python
|
tests/utils.py
|
tobricz/Expression
|
3ff020fda97e8d0bc43467d64b4a9f481fc0c386
|
[
"MIT"
] | 135
|
2020-11-30T02:32:32.000Z
|
2022-03-29T14:29:34.000Z
|
tests/utils.py
|
tobricz/Expression
|
3ff020fda97e8d0bc43467d64b4a9f481fc0c386
|
[
"MIT"
] | 50
|
2020-11-30T02:42:33.000Z
|
2022-03-19T15:39:58.000Z
|
tests/utils.py
|
tobricz/Expression
|
3ff020fda97e8d0bc43467d64b4a9f481fc0c386
|
[
"MIT"
] | 16
|
2020-12-15T17:15:36.000Z
|
2022-03-16T08:38:57.000Z
|
class CustomException(Exception):
def __init__(self, message: str):
self.message = message
def throw(err: Exception):
raise err
| 18.25
| 37
| 0.684932
| 17
| 146
| 5.647059
| 0.647059
| 0.229167
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.219178
| 146
| 7
| 38
| 20.857143
| 0.842105
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
cfad240cbe2de63e774a45108d8024a830541cc9
| 3,820
|
py
|
Python
|
sha.py
|
Jfghanimah/fpga-miner
|
fcf8fb4a7eed941de78b2517046cdcc7305e833d
|
[
"MIT"
] | 1
|
2021-12-19T18:11:25.000Z
|
2021-12-19T18:11:25.000Z
|
sha.py
|
Jfghanimah/fpga-miner
|
fcf8fb4a7eed941de78b2517046cdcc7305e833d
|
[
"MIT"
] | null | null | null |
sha.py
|
Jfghanimah/fpga-miner
|
fcf8fb4a7eed941de78b2517046cdcc7305e833d
|
[
"MIT"
] | null | null | null |
import timeit
W = 32 #Number of bits in word
M = 1 << W
FF = M - 1 #0xFFFFFFFF (for performing addition mod 2**32)
#Constants from SHA256 definition
K_t = (0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2)
#Initial values for compression func
H_t = (0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a,
0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19)
#Block Padding
padding = (
0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00)
# 32-bit bitwise rotate right
def RR(x, b):
return ((x >> b) | (x << (W - b))) & FF
# Pads a message and converts to byte array
def Pad(W):
mdi = len(W) % 64
L = (len(W) << 3).to_bytes(8, 'big') #Binary of len(W) in bits
npad = 55 - mdi if mdi < 56 else 119 - mdi #Pad so 64 | len; add 1 block if needed
return bytes(W, 'ascii') + b'\x80' + (b'\x00' * npad) + L #64 | 1 + npad + 8 + len(W)
# Compression Function
def Sha256CF(Wt, Kt, A, B, C, D, E, F, G, H):
Ch = (E & F) ^ (~E & G)
Ma = (A & B) ^ (A & C) ^ (B & C) #Major
S0 = RR(A, 2) ^ RR(A, 13) ^ RR(A, 22) #Sigma_0
S1 = RR(E, 6) ^ RR(E, 11) ^ RR(E, 25) #Sigma_1
T1 = H + S1 + Ch + Wt + Kt
return (T1 + S0 + Ma) & FF, A, B, C, (D + T1) & FF, E, F, G
def Sha256(M):
'''
Performs SHA256 on an input string
M: The string to process
return: A 32 byte array of the binary digest
'''
M = Pad(M) #Pad message so that length is divisible by 64
DG = list(H_t) #Digest as 8 32-bit words (A-H)
for j in range(0, len(M), 64): #Iterate over message in chunks of 64
S = M[j:j + 64] #Current chunk
W = [0] * 64
W[0:16] = [int.from_bytes(S[i:i + 4], 'big') for i in range(0, 64, 4)]
for i in range(16, 64):
s0 = RR(W[i - 15], 7) ^ RR(W[i - 15], 18) ^ (W[i - 15] >> 3)
s1 = RR(W[i - 2], 17) ^ RR(W[i - 2], 19) ^ (W[i - 2] >> 10)
W[i] = (W[i - 16] + s0 + W[i-7] + s1) & FF
A, B, C, D, E, F, G, H = DG #State of the compression function
for i in range(64):
A, B, C, D, E, F, G, H = Sha256CF(W[i], K_t[i], A, B, C, D, E, F, G, H)
DG = [(X + Y) & FF for X, Y in zip(DG, (A, B, C, D, E, F, G, H))]
return b''.join(Di.to_bytes(4, 'big') for Di in DG) #Convert to byte array
if __name__ == "__main__":
print('\n'*10)
print("Running Benchmark for software\n")
time = timeit.timeit("Sha256('Bitcoin Miner!')", number=10000, globals=globals())
print(f'Python Software Encryption Speed: {10000/time} H/s\n')
while(1):
msg = input("Enter msg:")
bd = Sha256(msg)
print(''.join('{:02x}'.format(i) for i in bd))
| 41.978022
| 97
| 0.585602
| 587
| 3,820
| 3.781942
| 0.388416
| 0.223423
| 0.32973
| 0.432432
| 0.136937
| 0.133333
| 0.133333
| 0.133333
| 0.122523
| 0.113514
| 0
| 0.275151
| 0.263613
| 3,820
| 91
| 98
| 41.978022
| 0.514042
| 0.165183
| 0
| 0.030303
| 0
| 0
| 0.049571
| 0
| 0
| 0
| 0.310137
| 0
| 0
| 1
| 0.060606
| false
| 0
| 0.015152
| 0.015152
| 0.136364
| 0.060606
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
cfbdc3d684ec4775e9200fbfec05b45b578aaee8
| 31
|
py
|
Python
|
Constants.py
|
TmanHef/CentipedeRL
|
400d6e71826a79c1a597b057b7543080d3f3c376
|
[
"MIT"
] | 1
|
2020-08-12T02:43:57.000Z
|
2020-08-12T02:43:57.000Z
|
Constants.py
|
TmanHef/CentipedeRL
|
400d6e71826a79c1a597b057b7543080d3f3c376
|
[
"MIT"
] | null | null | null |
Constants.py
|
TmanHef/CentipedeRL
|
400d6e71826a79c1a597b057b7543080d3f3c376
|
[
"MIT"
] | null | null | null |
take_action = 0
pass_action = 1
| 15.5
| 15
| 0.774194
| 6
| 31
| 3.666667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 0.16129
| 31
| 2
| 16
| 15.5
| 0.769231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.5
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 4
|
cfe01a8804824c311ce3867e63b5c0e7159d9fc9
| 168
|
py
|
Python
|
lib/app_blocker.py
|
esoadamo/simple-guardian-ad-hoc
|
d30afcff5ee14da10837908b531a0e04524e4c9a
|
[
"MIT"
] | null | null | null |
lib/app_blocker.py
|
esoadamo/simple-guardian-ad-hoc
|
d30afcff5ee14da10837908b531a0e04524e4c9a
|
[
"MIT"
] | null | null | null |
lib/app_blocker.py
|
esoadamo/simple-guardian-ad-hoc
|
d30afcff5ee14da10837908b531a0e04524e4c9a
|
[
"MIT"
] | null | null | null |
class IPBlocker:
def block(self, ip: str) -> bool:
raise NotImplementedError()
def unblock(self, ip: str) -> bool:
raise NotImplementedError()
| 24
| 39
| 0.630952
| 18
| 168
| 5.888889
| 0.611111
| 0.113208
| 0.169811
| 0.245283
| 0.698113
| 0.698113
| 0
| 0
| 0
| 0
| 0
| 0
| 0.255952
| 168
| 6
| 40
| 28
| 0.848
| 0
| 0
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
cfe927bd7ca7e8f24aafb7b3f14f3e83885d3a2b
| 27
|
py
|
Python
|
src/sc3nb/resources/__init__.py
|
interactive-sonification/sc3nb
|
c6081ae01f4e72094b6cb6dbd9667278c8d21069
|
[
"MIT"
] | 7
|
2021-08-02T12:57:13.000Z
|
2022-02-16T08:54:23.000Z
|
src/sc3nb/resources/__init__.py
|
thomas-hermann/sc3nb
|
7d7fbd9178fe804c5c8ddd0ddd4075579221b7c4
|
[
"MIT"
] | 3
|
2019-08-09T17:56:18.000Z
|
2020-10-24T13:05:47.000Z
|
src/sc3nb/resources/__init__.py
|
thomas-hermann/sc3nb
|
7d7fbd9178fe804c5c8ddd0ddd4075579221b7c4
|
[
"MIT"
] | 6
|
2019-04-18T17:25:42.000Z
|
2020-04-28T09:43:33.000Z
|
"""Module for resources"""
| 13.5
| 26
| 0.666667
| 3
| 27
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 27
| 1
| 27
| 27
| 0.75
| 0.740741
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
cff23a15ff1214959fa2e3d85c5cc6d66b104339
| 42
|
py
|
Python
|
dvc/repo/install.py
|
kaiogu/dvc
|
ffa8fe5888dbbb3d37b3874562f99fd77d4bbcb7
|
[
"Apache-2.0"
] | 3
|
2020-01-31T05:33:14.000Z
|
2021-05-20T08:19:25.000Z
|
dvc/repo/install.py
|
kaiogu/dvc
|
ffa8fe5888dbbb3d37b3874562f99fd77d4bbcb7
|
[
"Apache-2.0"
] | null | null | null |
dvc/repo/install.py
|
kaiogu/dvc
|
ffa8fe5888dbbb3d37b3874562f99fd77d4bbcb7
|
[
"Apache-2.0"
] | 1
|
2019-09-02T00:29:40.000Z
|
2019-09-02T00:29:40.000Z
|
def install(self):
self.scm.install()
| 14
| 22
| 0.666667
| 6
| 42
| 4.666667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 42
| 2
| 23
| 21
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
cff9d5239b3700f0d9618adc535bbe162bd56f31
| 65
|
py
|
Python
|
setup.py
|
rioyokotalab/asdfghjkl
|
f435c1e2527162fb07512b4ce5058460aab238b9
|
[
"MIT"
] | null | null | null |
setup.py
|
rioyokotalab/asdfghjkl
|
f435c1e2527162fb07512b4ce5058460aab238b9
|
[
"MIT"
] | null | null | null |
setup.py
|
rioyokotalab/asdfghjkl
|
f435c1e2527162fb07512b4ce5058460aab238b9
|
[
"MIT"
] | null | null | null |
import setuptools
if __name__ == "main":
setuptools.setup()
| 13
| 22
| 0.692308
| 7
| 65
| 5.857143
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.184615
| 65
| 4
| 23
| 16.25
| 0.773585
| 0
| 0
| 0
| 0
| 0
| 0.061538
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
320512f0c69af9c1065d29be90296020700656ad
| 141
|
py
|
Python
|
testing/pkg1/patchingmod_main.py
|
mxr/pymonkey
|
f2e55590c7064019e928eddc41dad5d288722ce6
|
[
"MIT"
] | 4
|
2018-04-02T18:10:02.000Z
|
2019-09-28T21:43:56.000Z
|
testing/pkg1/patchingmod_main.py
|
mxr/pymonkey
|
f2e55590c7064019e928eddc41dad5d288722ce6
|
[
"MIT"
] | 4
|
2016-05-05T02:08:59.000Z
|
2017-08-15T01:33:19.000Z
|
testing/pkg1/patchingmod_main.py
|
mxr/pymonkey
|
f2e55590c7064019e928eddc41dad5d288722ce6
|
[
"MIT"
] | 1
|
2018-04-09T15:14:33.000Z
|
2018-04-09T15:14:33.000Z
|
from pymonkey import make_entry_point
main = make_entry_point(('patchingmod',), 'targetmod')
if __name__ == '__main__':
exit(main())
| 15.666667
| 54
| 0.70922
| 17
| 141
| 5.176471
| 0.705882
| 0.204545
| 0.318182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148936
| 141
| 8
| 55
| 17.625
| 0.733333
| 0
| 0
| 0
| 0
| 0
| 0.198582
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
320ed9f71383693ed5738f144db75650764b8b50
| 79
|
py
|
Python
|
test_custom_user_subclass/__init__.py
|
montebond/pyp
|
9a0356f8ade44fd813e03d54f5b934336c6b6656
|
[
"BSD-3-Clause"
] | 238
|
2015-01-05T16:51:00.000Z
|
2022-03-31T16:06:19.000Z
|
test_custom_user_subclass/__init__.py
|
montebond/pyp
|
9a0356f8ade44fd813e03d54f5b934336c6b6656
|
[
"BSD-3-Clause"
] | 32
|
2015-02-20T18:10:15.000Z
|
2022-03-29T18:54:04.000Z
|
test_custom_user_subclass/__init__.py
|
montebond/pyp
|
9a0356f8ade44fd813e03d54f5b934336c6b6656
|
[
"BSD-3-Clause"
] | 72
|
2015-01-05T16:56:24.000Z
|
2022-02-27T17:34:48.000Z
|
default_app_config = 'test_custom_user_subclass.apps.CustomUserSubclassConfig'
| 39.5
| 78
| 0.898734
| 9
| 79
| 7.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.037975
| 79
| 1
| 79
| 79
| 0.868421
| 0
| 0
| 0
| 0
| 0
| 0.696203
| 0.696203
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
320ee976739f42fedf893794dfa5031aa4b5791f
| 1,488
|
py
|
Python
|
app/models.py
|
pianomanfrazier/simple-blog
|
e3e1058c83726ec49273323625fcc3e721618d3d
|
[
"MIT"
] | null | null | null |
app/models.py
|
pianomanfrazier/simple-blog
|
e3e1058c83726ec49273323625fcc3e721618d3d
|
[
"MIT"
] | null | null | null |
app/models.py
|
pianomanfrazier/simple-blog
|
e3e1058c83726ec49273323625fcc3e721618d3d
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from app import db
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), index=True, unique=True)
email = db.Column(db.String(64))
class Category(db.Model):
category = db.Column(db.String(32), primary_key=True, unique=True)
class Tag(db.Model):
tag = db.Column(db.String(32), primary_key=True, unique=True)
class Post(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(64), unique=True)
slug = db.Column(db.String(32), unique=True)
content = db.Column(db.String(1000))
category = db.Column(db.String(32), db.ForeignKey(Category.category))
pub_date = db.Column(db.Date)
last_updated = db.Column(db.DateTime, default=datetime.utcnow)
draft = db.Column(db.Boolean)
class Comment(db.Model):
id = db.Column(db.Integer, primary_key=True)
post_id = db.Column(db.Integer, db.ForeignKey(Post.id))
user_id = db.Column(db.Integer, db.ForeignKey(User.id))
comment = db.Column(db.String(256))
timestamp = db.Column(db.DateTime, default=datetime.utcnow)
approved = db.Column(db.Boolean)
class Media(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(32))
filepath = db.Column(db.String(16))
mime = db.Column(db.String(16))
size = db.Column(db.Integer)
| 38.153846
| 75
| 0.645161
| 216
| 1,488
| 4.398148
| 0.217593
| 0.202105
| 0.252632
| 0.202105
| 0.629474
| 0.450526
| 0.414737
| 0.267368
| 0.267368
| 0.267368
| 0
| 0.023018
| 0.211694
| 1,488
| 38
| 76
| 39.157895
| 0.786871
| 0
| 0
| 0.125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.0625
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
321b20f9d6d0aa3b0ffc1a6053b57cbad71ac3d1
| 40
|
py
|
Python
|
lspy/__init__.py
|
liushilive/PythonTutor
|
967cdc842fcfef7477110889e2d947116a2448c7
|
[
"MIT"
] | null | null | null |
lspy/__init__.py
|
liushilive/PythonTutor
|
967cdc842fcfef7477110889e2d947116a2448c7
|
[
"MIT"
] | null | null | null |
lspy/__init__.py
|
liushilive/PythonTutor
|
967cdc842fcfef7477110889e2d947116a2448c7
|
[
"MIT"
] | 3
|
2020-08-30T06:00:06.000Z
|
2022-01-06T12:36:43.000Z
|
__version__ = "0.0.4"
__author__ = "刘士"
| 13.333333
| 21
| 0.65
| 6
| 40
| 3
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088235
| 0.15
| 40
| 2
| 22
| 20
| 0.441176
| 0
| 0
| 0
| 0
| 0
| 0.175
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
5c5070cb18878ecd7f37417f5ff3abcf765afa7a
| 222
|
py
|
Python
|
tests/template_backends/test_django.py
|
arachnegl/django
|
c4e796aa1bab70ee66151d24a061af811e08ad08
|
[
"BSD-3-Clause"
] | 1
|
2019-01-14T10:58:43.000Z
|
2019-01-14T10:58:43.000Z
|
tests/template_backends/test_django.py
|
arachnegl/django
|
c4e796aa1bab70ee66151d24a061af811e08ad08
|
[
"BSD-3-Clause"
] | null | null | null |
tests/template_backends/test_django.py
|
arachnegl/django
|
c4e796aa1bab70ee66151d24a061af811e08ad08
|
[
"BSD-3-Clause"
] | null | null | null |
from django.template.backends.django import DjangoTemplates
from .test_dummy import TemplateStringsTests
class DjangoTemplatesTests(TemplateStringsTests):
engine_class = DjangoTemplates
backend_name = 'django'
| 22.2
| 59
| 0.824324
| 21
| 222
| 8.571429
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.126126
| 222
| 9
| 60
| 24.666667
| 0.927835
| 0
| 0
| 0
| 0
| 0
| 0.027027
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
5c63298d5f399fa1ac8a31fab042f54823289e28
| 16,267
|
py
|
Python
|
RI/flask_server/tapi_server/models/tapi_connectivity_resilience_constraint.py
|
arthurMll/TAPI
|
e1171bb139c6791a953af09cfc2bc7ad928da73d
|
[
"Apache-2.0"
] | 57
|
2018-04-09T08:56:18.000Z
|
2022-03-23T08:31:06.000Z
|
RI/flask_server/tapi_server/models/tapi_connectivity_resilience_constraint.py
|
arthurMll/TAPI
|
e1171bb139c6791a953af09cfc2bc7ad928da73d
|
[
"Apache-2.0"
] | 143
|
2016-06-08T04:09:54.000Z
|
2018-02-23T10:45:59.000Z
|
RI/flask_server/tapi_server/models/tapi_connectivity_resilience_constraint.py
|
arthurMll/TAPI
|
e1171bb139c6791a953af09cfc2bc7ad928da73d
|
[
"Apache-2.0"
] | 64
|
2018-03-07T07:55:17.000Z
|
2022-03-28T07:14:28.000Z
|
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from tapi_server.models.base_model_ import Model
from tapi_server.models.tapi_common_layer_protocol_name import TapiCommonLayerProtocolName # noqa: F401,E501
from tapi_server.models.tapi_connectivity_coordinate_type import TapiConnectivityCoordinateType # noqa: F401,E501
from tapi_server.models.tapi_connectivity_reversion_mode import TapiConnectivityReversionMode # noqa: F401,E501
from tapi_server.models.tapi_topology_resilience_type import TapiTopologyResilienceType # noqa: F401,E501
from tapi_server import util
class TapiConnectivityResilienceConstraint(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, is_lock_out=False, max_switch_times=None, restoration_coordinate_type=None, is_coordinated_switching_both_ends=False, hold_off_time=None, is_frozen=False, wait_to_revert_time=15, resilience_type=None, preferred_restoration_layer=None, restore_priority=None, reversion_mode=None): # noqa: E501
"""TapiConnectivityResilienceConstraint - a model defined in OpenAPI
:param is_lock_out: The is_lock_out of this TapiConnectivityResilienceConstraint. # noqa: E501
:type is_lock_out: bool
:param max_switch_times: The max_switch_times of this TapiConnectivityResilienceConstraint. # noqa: E501
:type max_switch_times: int
:param restoration_coordinate_type: The restoration_coordinate_type of this TapiConnectivityResilienceConstraint. # noqa: E501
:type restoration_coordinate_type: TapiConnectivityCoordinateType
:param is_coordinated_switching_both_ends: The is_coordinated_switching_both_ends of this TapiConnectivityResilienceConstraint. # noqa: E501
:type is_coordinated_switching_both_ends: bool
:param hold_off_time: The hold_off_time of this TapiConnectivityResilienceConstraint. # noqa: E501
:type hold_off_time: int
:param is_frozen: The is_frozen of this TapiConnectivityResilienceConstraint. # noqa: E501
:type is_frozen: bool
:param wait_to_revert_time: The wait_to_revert_time of this TapiConnectivityResilienceConstraint. # noqa: E501
:type wait_to_revert_time: int
:param resilience_type: The resilience_type of this TapiConnectivityResilienceConstraint. # noqa: E501
:type resilience_type: TapiTopologyResilienceType
:param preferred_restoration_layer: The preferred_restoration_layer of this TapiConnectivityResilienceConstraint. # noqa: E501
:type preferred_restoration_layer: List[TapiCommonLayerProtocolName]
:param restore_priority: The restore_priority of this TapiConnectivityResilienceConstraint. # noqa: E501
:type restore_priority: int
:param reversion_mode: The reversion_mode of this TapiConnectivityResilienceConstraint. # noqa: E501
:type reversion_mode: TapiConnectivityReversionMode
"""
self.openapi_types = {
'is_lock_out': bool,
'max_switch_times': int,
'restoration_coordinate_type': TapiConnectivityCoordinateType,
'is_coordinated_switching_both_ends': bool,
'hold_off_time': int,
'is_frozen': bool,
'wait_to_revert_time': int,
'resilience_type': TapiTopologyResilienceType,
'preferred_restoration_layer': List[TapiCommonLayerProtocolName],
'restore_priority': int,
'reversion_mode': TapiConnectivityReversionMode
}
self.attribute_map = {
'is_lock_out': 'is-lock-out',
'max_switch_times': 'max-switch-times',
'restoration_coordinate_type': 'restoration-coordinate-type',
'is_coordinated_switching_both_ends': 'is-coordinated-switching-both-ends',
'hold_off_time': 'hold-off-time',
'is_frozen': 'is-frozen',
'wait_to_revert_time': 'wait-to-revert-time',
'resilience_type': 'resilience-type',
'preferred_restoration_layer': 'preferred-restoration-layer',
'restore_priority': 'restore-priority',
'reversion_mode': 'reversion-mode'
}
self._is_lock_out = is_lock_out
self._max_switch_times = max_switch_times
self._restoration_coordinate_type = restoration_coordinate_type
self._is_coordinated_switching_both_ends = is_coordinated_switching_both_ends
self._hold_off_time = hold_off_time
self._is_frozen = is_frozen
self._wait_to_revert_time = wait_to_revert_time
self._resilience_type = resilience_type
self._preferred_restoration_layer = preferred_restoration_layer
self._restore_priority = restore_priority
self._reversion_mode = reversion_mode
@classmethod
def from_dict(cls, dikt) -> 'TapiConnectivityResilienceConstraint':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The tapi.connectivity.ResilienceConstraint of this TapiConnectivityResilienceConstraint. # noqa: E501
:rtype: TapiConnectivityResilienceConstraint
"""
return util.deserialize_model(dikt, cls)
@property
def is_lock_out(self):
"""Gets the is_lock_out of this TapiConnectivityResilienceConstraint.
The resource is configured to temporarily not be available for use in the protection scheme(s) it is part of. This overrides all other protection control states including forced. If the item is locked out then it cannot be used under any circumstances. Note: Only relevant when part of a protection scheme. # noqa: E501
:return: The is_lock_out of this TapiConnectivityResilienceConstraint.
:rtype: bool
"""
return self._is_lock_out
@is_lock_out.setter
def is_lock_out(self, is_lock_out):
"""Sets the is_lock_out of this TapiConnectivityResilienceConstraint.
The resource is configured to temporarily not be available for use in the protection scheme(s) it is part of. This overrides all other protection control states including forced. If the item is locked out then it cannot be used under any circumstances. Note: Only relevant when part of a protection scheme. # noqa: E501
:param is_lock_out: The is_lock_out of this TapiConnectivityResilienceConstraint.
:type is_lock_out: bool
"""
self._is_lock_out = is_lock_out
@property
def max_switch_times(self):
"""Gets the max_switch_times of this TapiConnectivityResilienceConstraint.
Used to limit the maximum swtich times. When work fault disappears , and traffic return to the original work path, switch counter reset. # noqa: E501
:return: The max_switch_times of this TapiConnectivityResilienceConstraint.
:rtype: int
"""
return self._max_switch_times
@max_switch_times.setter
def max_switch_times(self, max_switch_times):
"""Sets the max_switch_times of this TapiConnectivityResilienceConstraint.
Used to limit the maximum swtich times. When work fault disappears , and traffic return to the original work path, switch counter reset. # noqa: E501
:param max_switch_times: The max_switch_times of this TapiConnectivityResilienceConstraint.
:type max_switch_times: int
"""
self._max_switch_times = max_switch_times
@property
def restoration_coordinate_type(self):
"""Gets the restoration_coordinate_type of this TapiConnectivityResilienceConstraint.
:return: The restoration_coordinate_type of this TapiConnectivityResilienceConstraint.
:rtype: TapiConnectivityCoordinateType
"""
return self._restoration_coordinate_type
@restoration_coordinate_type.setter
def restoration_coordinate_type(self, restoration_coordinate_type):
"""Sets the restoration_coordinate_type of this TapiConnectivityResilienceConstraint.
:param restoration_coordinate_type: The restoration_coordinate_type of this TapiConnectivityResilienceConstraint.
:type restoration_coordinate_type: TapiConnectivityCoordinateType
"""
self._restoration_coordinate_type = restoration_coordinate_type
@property
def is_coordinated_switching_both_ends(self):
"""Gets the is_coordinated_switching_both_ends of this TapiConnectivityResilienceConstraint.
Is operating such that switching at both ends of each flow acorss the FC is coordinated at both ingress and egress ends. # noqa: E501
:return: The is_coordinated_switching_both_ends of this TapiConnectivityResilienceConstraint.
:rtype: bool
"""
return self._is_coordinated_switching_both_ends
@is_coordinated_switching_both_ends.setter
def is_coordinated_switching_both_ends(self, is_coordinated_switching_both_ends):
"""Sets the is_coordinated_switching_both_ends of this TapiConnectivityResilienceConstraint.
Is operating such that switching at both ends of each flow acorss the FC is coordinated at both ingress and egress ends. # noqa: E501
:param is_coordinated_switching_both_ends: The is_coordinated_switching_both_ends of this TapiConnectivityResilienceConstraint.
:type is_coordinated_switching_both_ends: bool
"""
self._is_coordinated_switching_both_ends = is_coordinated_switching_both_ends
@property
def hold_off_time(self):
"""Gets the hold_off_time of this TapiConnectivityResilienceConstraint.
This attribute indicates the time, in milliseconds, between declaration of signal degrade or signal fail, and the initialization of the protection switching algorithm. # noqa: E501
:return: The hold_off_time of this TapiConnectivityResilienceConstraint.
:rtype: int
"""
return self._hold_off_time
@hold_off_time.setter
def hold_off_time(self, hold_off_time):
"""Sets the hold_off_time of this TapiConnectivityResilienceConstraint.
This attribute indicates the time, in milliseconds, between declaration of signal degrade or signal fail, and the initialization of the protection switching algorithm. # noqa: E501
:param hold_off_time: The hold_off_time of this TapiConnectivityResilienceConstraint.
:type hold_off_time: int
"""
self._hold_off_time = hold_off_time
@property
def is_frozen(self):
"""Gets the is_frozen of this TapiConnectivityResilienceConstraint.
Temporarily prevents any switch action to be taken and, as such, freezes the current state. Until the freeze is cleared, additional near-end external commands are rejected and fault condition changes and received APS messages are ignored. All administrative controls of any aspect of protection are rejected. # noqa: E501
:return: The is_frozen of this TapiConnectivityResilienceConstraint.
:rtype: bool
"""
return self._is_frozen
@is_frozen.setter
def is_frozen(self, is_frozen):
"""Sets the is_frozen of this TapiConnectivityResilienceConstraint.
Temporarily prevents any switch action to be taken and, as such, freezes the current state. Until the freeze is cleared, additional near-end external commands are rejected and fault condition changes and received APS messages are ignored. All administrative controls of any aspect of protection are rejected. # noqa: E501
:param is_frozen: The is_frozen of this TapiConnectivityResilienceConstraint.
:type is_frozen: bool
"""
self._is_frozen = is_frozen
@property
def wait_to_revert_time(self):
"""Gets the wait_to_revert_time of this TapiConnectivityResilienceConstraint.
If the protection system is revertive, this attribute specifies the time, in minutes, to wait after a fault clears on a higher priority (preferred) resource before reverting to the preferred resource. # noqa: E501
:return: The wait_to_revert_time of this TapiConnectivityResilienceConstraint.
:rtype: int
"""
return self._wait_to_revert_time
@wait_to_revert_time.setter
def wait_to_revert_time(self, wait_to_revert_time):
"""Sets the wait_to_revert_time of this TapiConnectivityResilienceConstraint.
If the protection system is revertive, this attribute specifies the time, in minutes, to wait after a fault clears on a higher priority (preferred) resource before reverting to the preferred resource. # noqa: E501
:param wait_to_revert_time: The wait_to_revert_time of this TapiConnectivityResilienceConstraint.
:type wait_to_revert_time: int
"""
self._wait_to_revert_time = wait_to_revert_time
@property
def resilience_type(self):
"""Gets the resilience_type of this TapiConnectivityResilienceConstraint.
:return: The resilience_type of this TapiConnectivityResilienceConstraint.
:rtype: TapiTopologyResilienceType
"""
return self._resilience_type
@resilience_type.setter
def resilience_type(self, resilience_type):
"""Sets the resilience_type of this TapiConnectivityResilienceConstraint.
:param resilience_type: The resilience_type of this TapiConnectivityResilienceConstraint.
:type resilience_type: TapiTopologyResilienceType
"""
self._resilience_type = resilience_type
@property
def preferred_restoration_layer(self):
"""Gets the preferred_restoration_layer of this TapiConnectivityResilienceConstraint.
Indicate which layer this resilience parameters package configured for. # noqa: E501
:return: The preferred_restoration_layer of this TapiConnectivityResilienceConstraint.
:rtype: List[TapiCommonLayerProtocolName]
"""
return self._preferred_restoration_layer
@preferred_restoration_layer.setter
def preferred_restoration_layer(self, preferred_restoration_layer):
"""Sets the preferred_restoration_layer of this TapiConnectivityResilienceConstraint.
Indicate which layer this resilience parameters package configured for. # noqa: E501
:param preferred_restoration_layer: The preferred_restoration_layer of this TapiConnectivityResilienceConstraint.
:type preferred_restoration_layer: List[TapiCommonLayerProtocolName]
"""
self._preferred_restoration_layer = preferred_restoration_layer
@property
def restore_priority(self):
"""Gets the restore_priority of this TapiConnectivityResilienceConstraint.
none # noqa: E501
:return: The restore_priority of this TapiConnectivityResilienceConstraint.
:rtype: int
"""
return self._restore_priority
@restore_priority.setter
def restore_priority(self, restore_priority):
"""Sets the restore_priority of this TapiConnectivityResilienceConstraint.
none # noqa: E501
:param restore_priority: The restore_priority of this TapiConnectivityResilienceConstraint.
:type restore_priority: int
"""
self._restore_priority = restore_priority
@property
def reversion_mode(self):
"""Gets the reversion_mode of this TapiConnectivityResilienceConstraint.
:return: The reversion_mode of this TapiConnectivityResilienceConstraint.
:rtype: TapiConnectivityReversionMode
"""
return self._reversion_mode
@reversion_mode.setter
def reversion_mode(self, reversion_mode):
"""Sets the reversion_mode of this TapiConnectivityResilienceConstraint.
:param reversion_mode: The reversion_mode of this TapiConnectivityResilienceConstraint.
:type reversion_mode: TapiConnectivityReversionMode
"""
self._reversion_mode = reversion_mode
| 47.150725
| 391
| 0.733018
| 1,813
| 16,267
| 6.296746
| 0.112521
| 0.030484
| 0.206027
| 0.050105
| 0.8072
| 0.70918
| 0.630168
| 0.523038
| 0.492554
| 0.402943
| 0
| 0.009387
| 0.214114
| 16,267
| 344
| 392
| 47.287791
| 0.883605
| 0.58634
| 0
| 0.282051
| 0
| 0
| 0.112441
| 0.052789
| 0
| 0
| 0
| 0
| 0
| 1
| 0.205128
| false
| 0
| 0.076923
| 0
| 0.393162
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
5c75c79e7b141cc80ac12483d8bd267f8a675134
| 110
|
py
|
Python
|
Lampi/main.py
|
Ooyekunle/dummytestFeb172020
|
fbf0f966d38a90af4fede4c1deef341bf9f7d8c3
|
[
"RSA-MD"
] | null | null | null |
Lampi/main.py
|
Ooyekunle/dummytestFeb172020
|
fbf0f966d38a90af4fede4c1deef341bf9f7d8c3
|
[
"RSA-MD"
] | null | null | null |
Lampi/main.py
|
Ooyekunle/dummytestFeb172020
|
fbf0f966d38a90af4fede4c1deef341bf9f7d8c3
|
[
"RSA-MD"
] | null | null | null |
#!/usr/bin/env python3
from lampi.lampi_app import LampiApp
if __name__ == "__main__":
LampiApp().run()
| 15.714286
| 36
| 0.7
| 15
| 110
| 4.533333
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010753
| 0.154545
| 110
| 6
| 37
| 18.333333
| 0.72043
| 0.190909
| 0
| 0
| 0
| 0
| 0.090909
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
5c7f2b4d05c98efc722f2031521e7206c2e38a4e
| 86
|
py
|
Python
|
microgrid/__init__.py
|
bcornelusse/microgrid-bench
|
3f9a8a99f12b580c81f52bb146a5f47f08a032c0
|
[
"BSD-2-Clause"
] | 10
|
2017-10-11T11:50:30.000Z
|
2021-08-02T20:44:42.000Z
|
microgrid/__init__.py
|
bcornelusse/microgrid-bench
|
3f9a8a99f12b580c81f52bb146a5f47f08a032c0
|
[
"BSD-2-Clause"
] | null | null | null |
microgrid/__init__.py
|
bcornelusse/microgrid-bench
|
3f9a8a99f12b580c81f52bb146a5f47f08a032c0
|
[
"BSD-2-Clause"
] | 1
|
2017-10-11T11:49:52.000Z
|
2017-10-11T11:49:52.000Z
|
"""
The microgrid package organizes the test-bench functionalities in subpackages.
"""
| 28.666667
| 78
| 0.790698
| 10
| 86
| 6.8
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116279
| 86
| 3
| 79
| 28.666667
| 0.894737
| 0.906977
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
5c7f7deeb931e035995b8a071503bf7f94cb090c
| 89
|
py
|
Python
|
epsilon/apps.py
|
atlednolispe/blog
|
d3926e424d544f3e9a3805b16a15072ac6c6a780
|
[
"MIT"
] | null | null | null |
epsilon/apps.py
|
atlednolispe/blog
|
d3926e424d544f3e9a3805b16a15072ac6c6a780
|
[
"MIT"
] | 7
|
2020-06-05T16:37:46.000Z
|
2022-03-11T23:11:36.000Z
|
epsilon/apps.py
|
atlednolispe/blog
|
d3926e424d544f3e9a3805b16a15072ac6c6a780
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class EpsilonConfig(AppConfig):
name = 'epsilon'
| 14.833333
| 33
| 0.752809
| 10
| 89
| 6.7
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.168539
| 89
| 5
| 34
| 17.8
| 0.905405
| 0
| 0
| 0
| 0
| 0
| 0.078652
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
7a31027b5ed0e4096f71764a61c58585d6898d31
| 116
|
py
|
Python
|
kigo/bpmn/elements/definitions.py
|
AsyncMicroStack/kigo-bpmn
|
dfe8c312399f86067393973a0c28a8695bb5e07a
|
[
"Apache-2.0"
] | null | null | null |
kigo/bpmn/elements/definitions.py
|
AsyncMicroStack/kigo-bpmn
|
dfe8c312399f86067393973a0c28a8695bb5e07a
|
[
"Apache-2.0"
] | null | null | null |
kigo/bpmn/elements/definitions.py
|
AsyncMicroStack/kigo-bpmn
|
dfe8c312399f86067393973a0c28a8695bb5e07a
|
[
"Apache-2.0"
] | null | null | null |
from kigo.bpmn.elements.element import Element
class BpmnDefinitions(Element):
item_name = "bpmn:definitions"
| 19.333333
| 46
| 0.784483
| 14
| 116
| 6.428571
| 0.785714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12931
| 116
| 5
| 47
| 23.2
| 0.891089
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
7a397491b82c42c5a64d723d577f9c2c7718bf3a
| 187
|
py
|
Python
|
testPins.py
|
brammieman1/MazeSolver
|
22d5a91e95fe88aecd4d4b1f75218d628d2e5fe4
|
[
"MIT"
] | null | null | null |
testPins.py
|
brammieman1/MazeSolver
|
22d5a91e95fe88aecd4d4b1f75218d628d2e5fe4
|
[
"MIT"
] | null | null | null |
testPins.py
|
brammieman1/MazeSolver
|
22d5a91e95fe88aecd4d4b1f75218d628d2e5fe4
|
[
"MIT"
] | null | null | null |
import wiringpi2 as wiringpi
import time
wiringpi.wiringPiSetupGpio()
wiringpi.pinMode(17,1)
wiringpi.digitalWrite(17,1)
time.sleep(4)
wiringpi.digitalWrite(17,0)
wiringpi.pinMode(17,0)
| 18.7
| 28
| 0.812834
| 27
| 187
| 5.62963
| 0.481481
| 0.197368
| 0.223684
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.08046
| 0.069519
| 187
| 9
| 29
| 20.777778
| 0.793103
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.25
| 0
| 0.25
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
7a60e3f7182adc1bb57a1b4a891e3d337828847a
| 676
|
py
|
Python
|
Proj1/test.py
|
sebemery/EE559-DeepLearning-MiniProjects
|
34ea114884e2e4f1416d63fa53466619e6a5c5fd
|
[
"MIT"
] | null | null | null |
Proj1/test.py
|
sebemery/EE559-DeepLearning-MiniProjects
|
34ea114884e2e4f1416d63fa53466619e6a5c5fd
|
[
"MIT"
] | null | null | null |
Proj1/test.py
|
sebemery/EE559-DeepLearning-MiniProjects
|
34ea114884e2e4f1416d63fa53466619e6a5c5fd
|
[
"MIT"
] | 1
|
2020-03-18T18:58:02.000Z
|
2020-03-18T18:58:02.000Z
|
from models.Nets import *
from models.Basic import *
from models.Inception_Net import *
from models.Le_Net import *
from utils.dlc_practical_prologue import *
from utils.Evaluate import *
from utils.grid_search import *
from utils.loader import *
from utils.metrics import *
from utils.plot import *
from utils.training import *
import argparse
if __name__ == "__main__":
Nets_default = Nets()
seeds = [1,2,3,4,5,6,7,8,9,10]
train_results, test_losses,test_accuracies = evaluate_model(Nets_default.LeNet_sharing_aux, seeds , plot =False,
rotate = True,translate=True,swap_channel = True)
| 30.727273
| 117
| 0.683432
| 91
| 676
| 4.835165
| 0.549451
| 0.227273
| 0.238636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021195
| 0.232249
| 676
| 21
| 118
| 32.190476
| 0.82659
| 0
| 0
| 0
| 0
| 0
| 0.011834
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.705882
| 0
| 0.705882
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
7a685ed0387ff3acf55274c958313caca3731d6f
| 209
|
py
|
Python
|
ADVECTOR/enums/forcings.py
|
john-science/ADVECTOR
|
5c5ca7595c2c051f1a088b1f0e694936c3da3610
|
[
"MIT"
] | 7
|
2021-09-07T02:32:00.000Z
|
2022-01-15T11:35:02.000Z
|
ADVECTOR/enums/forcings.py
|
TheOceanCleanupAlgorithms/ADVECT
|
e27ce15da6a2fcbccbe363f8c2415b0122696d1f
|
[
"MIT"
] | 1
|
2021-12-24T15:16:26.000Z
|
2021-12-24T15:16:26.000Z
|
ADVECTOR/enums/forcings.py
|
TheOceanCleanupAlgorithms/ADVECT
|
e27ce15da6a2fcbccbe363f8c2415b0122696d1f
|
[
"MIT"
] | 1
|
2021-12-12T15:13:52.000Z
|
2021-12-12T15:13:52.000Z
|
from enum import Enum
class Forcing(Enum):
"""use .name for variable name, .value for human readable name"""
current = "current"
wind = "10-meter wind"
seawater_density = "seawater density"
| 20.9
| 69
| 0.674641
| 27
| 209
| 5.185185
| 0.666667
| 0.214286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01227
| 0.220096
| 209
| 9
| 70
| 23.222222
| 0.846626
| 0.282297
| 0
| 0
| 0
| 0
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
7a6de493fc92b2d2c1df09e619cb6257aedb5b88
| 101
|
py
|
Python
|
predict_emotions.py
|
fahsan/EmotiFind
|
aa3071d296cb61a91eea5e0139a9cbeb82b7d14b
|
[
"MIT"
] | null | null | null |
predict_emotions.py
|
fahsan/EmotiFind
|
aa3071d296cb61a91eea5e0139a9cbeb82b7d14b
|
[
"MIT"
] | null | null | null |
predict_emotions.py
|
fahsan/EmotiFind
|
aa3071d296cb61a91eea5e0139a9cbeb82b7d14b
|
[
"MIT"
] | 1
|
2018-05-12T22:54:49.000Z
|
2018-05-12T22:54:49.000Z
|
#Takes as input a numpy array of 3 parameters per article and a output array of emotions per text.
| 33.666667
| 99
| 0.772277
| 19
| 101
| 4.105263
| 0.789474
| 0.179487
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0125
| 0.207921
| 101
| 2
| 100
| 50.5
| 0.9625
| 0.960396
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
7a7af5fdb993c98e171a62bd93ae82a0d3f1b8bd
| 291
|
py
|
Python
|
tests/globals/documents/misc_documents.py
|
RelevanceAI/RelevanceAI
|
a0542f35153d9c842f3d2cd0955d6b07f6dfc07b
|
[
"Apache-2.0"
] | 21
|
2021-11-23T13:01:36.000Z
|
2022-03-23T03:45:30.000Z
|
tests/globals/documents/misc_documents.py
|
RelevanceAI/RelevanceAI
|
a0542f35153d9c842f3d2cd0955d6b07f6dfc07b
|
[
"Apache-2.0"
] | 217
|
2021-11-23T00:11:01.000Z
|
2022-03-30T08:11:49.000Z
|
tests/globals/documents/misc_documents.py
|
RelevanceAI/RelevanceAI
|
a0542f35153d9c842f3d2cd0955d6b07f6dfc07b
|
[
"Apache-2.0"
] | 4
|
2022-01-04T01:48:30.000Z
|
2022-02-11T03:19:32.000Z
|
import pytest
from typing import List
from tests.globals.constants import NUMBER_OF_DOCUMENTS
from tests.globals.document import dataclass_document
@pytest.fixture(scope="session")
def dataclass_documents() -> List:
return [dataclass_document() for _ in range(NUMBER_OF_DOCUMENTS)]
| 22.384615
| 69
| 0.80756
| 38
| 291
| 5.973684
| 0.552632
| 0.079295
| 0.140969
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116838
| 291
| 12
| 70
| 24.25
| 0.883268
| 0
| 0
| 0
| 0
| 0
| 0.024055
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| true
| 0
| 0.571429
| 0.142857
| 0.857143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 4
|
7a8f9fda09004030cc2c8b33de4dfe7521550783
| 261
|
py
|
Python
|
backend/config/settings/environments/__init__.py
|
offurface/logistic-company
|
1e98b1191fd9ee63fbd9d6c2eef1354822e53d14
|
[
"MIT"
] | null | null | null |
backend/config/settings/environments/__init__.py
|
offurface/logistic-company
|
1e98b1191fd9ee63fbd9d6c2eef1354822e53d14
|
[
"MIT"
] | null | null | null |
backend/config/settings/environments/__init__.py
|
offurface/logistic-company
|
1e98b1191fd9ee63fbd9d6c2eef1354822e53d14
|
[
"MIT"
] | null | null | null |
from .common import *
from ..components import env
if DEBUG:
SECRET_KEY = env('SECRET_KEY', default='-qf)o7hs$jk@b8o)zidroo9wskuf^95m2$@k)5^@hl-=)349-7')
from .development import *
else:
SECRET_KEY = env('SECRET_KEY')
from .production import *
| 26.1
| 96
| 0.685824
| 38
| 261
| 4.605263
| 0.631579
| 0.205714
| 0.137143
| 0.205714
| 0.24
| 0
| 0
| 0
| 0
| 0
| 0
| 0.050459
| 0.164751
| 261
| 9
| 97
| 29
| 0.752294
| 0
| 0
| 0
| 0
| 0.125
| 0.268199
| 0.191571
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
7aa642c92098c5cae5a9ed0435a274be4705e211
| 245
|
py
|
Python
|
Lista09/ex013.py
|
Guilherme-Schwann/Listas-de-Exercicios-UFV-CCF-110
|
f306c8dc6385ee8c9580e687afa16a49ace68f95
|
[
"MIT"
] | 2
|
2021-09-05T22:29:33.000Z
|
2021-09-09T00:13:16.000Z
|
Lista09/ex013.py
|
Guilherme-Schwann/Listas-de-Exercicios-UFV-CCF-110
|
f306c8dc6385ee8c9580e687afa16a49ace68f95
|
[
"MIT"
] | null | null | null |
Lista09/ex013.py
|
Guilherme-Schwann/Listas-de-Exercicios-UFV-CCF-110
|
f306c8dc6385ee8c9580e687afa16a49ace68f95
|
[
"MIT"
] | null | null | null |
N = int(input('Ordem da matriz: '))
A = [[int(input()) for i in range(N)] for j in range(N)]
At = [[0 for i in range(N)] for j in range(N)]
for i in range(N):
for j in range(N):
At[i][j] = A[j][i]
for i in range(N):
print(At[i])
| 27.222222
| 56
| 0.542857
| 55
| 245
| 2.418182
| 0.272727
| 0.368421
| 0.421053
| 0.330827
| 0.661654
| 0.571429
| 0.571429
| 0.571429
| 0.571429
| 0.571429
| 0
| 0.005405
| 0.244898
| 245
| 8
| 57
| 30.625
| 0.713514
| 0
| 0
| 0.25
| 0
| 0
| 0.069388
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.125
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
7abefe6d36851192eaf9f176b148fc64e01f2ed3
| 156
|
py
|
Python
|
nets/__init__.py
|
zhuofalin/Pytorch_Mask_R-CNN
|
c940fb0f238cf75dca9e90c3f4433adee42650af
|
[
"Apache-2.0"
] | 2
|
2022-01-11T16:18:26.000Z
|
2022-01-23T05:56:42.000Z
|
utils/__init__.py
|
zhuofalin/Pytorch_Mask_R-CNN
|
c940fb0f238cf75dca9e90c3f4433adee42650af
|
[
"Apache-2.0"
] | null | null | null |
utils/__init__.py
|
zhuofalin/Pytorch_Mask_R-CNN
|
c940fb0f238cf75dca9e90c3f4433adee42650af
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
@Project :mask_rcnn_pytorch
@File :__init__.py.py
@Author :zhuofalin
@Date :2021/11/24 21:18
'''
| 17.333333
| 28
| 0.615385
| 23
| 156
| 3.913043
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.100775
| 0.173077
| 156
| 8
| 29
| 19.5
| 0.596899
| 0.923077
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
8f965f29b335fd5b6b8629d0ff9d0ab59a78971e
| 167
|
py
|
Python
|
10-Days-of-Statistics/Day 0/weighted-mean.py
|
vivekec/hackerrank
|
71b27a098f8432cd9ca13c6a93e9fd1c246dc8d3
|
[
"MIT"
] | null | null | null |
10-Days-of-Statistics/Day 0/weighted-mean.py
|
vivekec/hackerrank
|
71b27a098f8432cd9ca13c6a93e9fd1c246dc8d3
|
[
"MIT"
] | null | null | null |
10-Days-of-Statistics/Day 0/weighted-mean.py
|
vivekec/hackerrank
|
71b27a098f8432cd9ca13c6a93e9fd1c246dc8d3
|
[
"MIT"
] | null | null | null |
N = int(input())
X = list(map(int, input().split()))
W = list(map(int, input().split()))
out = 0
for i in range(N):
out += X[i] * W[i]
print(round(out/sum(W),1))
| 18.555556
| 35
| 0.550898
| 33
| 167
| 2.787879
| 0.545455
| 0.26087
| 0.217391
| 0.326087
| 0.434783
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014493
| 0.173653
| 167
| 8
| 36
| 20.875
| 0.652174
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.142857
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
8f98fada61d1cea79e871415ec49c42c654a446a
| 240
|
py
|
Python
|
code/python/tests/data/load/rvk_test.py
|
slub/docsa
|
c33a8243a60fbccbcd0a6418a59337e4ed39dc75
|
[
"Apache-2.0"
] | 11
|
2022-01-05T17:19:10.000Z
|
2022-02-14T18:57:37.000Z
|
code/python/tests/data/load/rvk_test.py
|
slub/docsa
|
c33a8243a60fbccbcd0a6418a59337e4ed39dc75
|
[
"Apache-2.0"
] | null | null | null |
code/python/tests/data/load/rvk_test.py
|
slub/docsa
|
c33a8243a60fbccbcd0a6418a59337e4ed39dc75
|
[
"Apache-2.0"
] | null | null | null |
"""Tests RVK data source."""
from slub_docsa.data.load.rvk import read_rvk_subjects
def test_rvk_first_level_classes():
"""Check that there are 34 first level classes in RVK."""
assert len(list(read_rvk_subjects(depth=1))) == 34
| 26.666667
| 61
| 0.733333
| 39
| 240
| 4.282051
| 0.692308
| 0.083832
| 0.179641
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02439
| 0.145833
| 240
| 8
| 62
| 30
| 0.790244
| 0.308333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
8fb3efefb1acc2f84cef71783ae8bbf501a5f8f1
| 81
|
py
|
Python
|
day05/t05/apps.py
|
SunShuoJia/pyproject
|
71f3cada463fd90243b2cdac8c982fb622f9ef9c
|
[
"Apache-2.0"
] | null | null | null |
day05/t05/apps.py
|
SunShuoJia/pyproject
|
71f3cada463fd90243b2cdac8c982fb622f9ef9c
|
[
"Apache-2.0"
] | null | null | null |
day05/t05/apps.py
|
SunShuoJia/pyproject
|
71f3cada463fd90243b2cdac8c982fb622f9ef9c
|
[
"Apache-2.0"
] | null | null | null |
from django.apps import AppConfig
class T05Config(AppConfig):
name = 't05'
| 13.5
| 33
| 0.728395
| 10
| 81
| 5.9
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.060606
| 0.185185
| 81
| 5
| 34
| 16.2
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0.037037
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
8fd08194d8b2f0502b3827d5a45622a36c8a6856
| 80
|
py
|
Python
|
ideal/contrib/django/__init__.py
|
mvantellingen/python-ideal
|
fe1254ff8b8199791855b8467c80cca3563d6f68
|
[
"MIT"
] | 8
|
2018-02-12T10:22:57.000Z
|
2019-11-14T16:10:28.000Z
|
ideal/contrib/django/__init__.py
|
mvantellingen/python-ideal
|
fe1254ff8b8199791855b8467c80cca3563d6f68
|
[
"MIT"
] | 5
|
2018-02-12T09:15:46.000Z
|
2018-02-23T12:43:29.000Z
|
ideal/contrib/django/__init__.py
|
mvantellingen/python-ideal
|
fe1254ff8b8199791855b8467c80cca3563d6f68
|
[
"MIT"
] | 1
|
2018-02-15T12:29:05.000Z
|
2018-02-15T12:29:05.000Z
|
# Moved Django integration to a seperate module to serve as Django's app name.
| 40
| 79
| 0.775
| 14
| 80
| 4.428571
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1875
| 80
| 1
| 80
| 80
| 0.953846
| 0.95
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
8fd10d4b55b2a58a75c683ed15c835d168e08322
| 89
|
py
|
Python
|
renting/apps.py
|
Ishikashah2510/nirvaas_main
|
5eaf92756d06261a7f555b10aad864a34c9e761b
|
[
"MIT"
] | null | null | null |
renting/apps.py
|
Ishikashah2510/nirvaas_main
|
5eaf92756d06261a7f555b10aad864a34c9e761b
|
[
"MIT"
] | null | null | null |
renting/apps.py
|
Ishikashah2510/nirvaas_main
|
5eaf92756d06261a7f555b10aad864a34c9e761b
|
[
"MIT"
] | 3
|
2020-12-30T11:35:22.000Z
|
2021-01-07T13:10:26.000Z
|
from django.apps import AppConfig
class RentingConfig(AppConfig):
name = 'renting'
| 14.833333
| 33
| 0.752809
| 10
| 89
| 6.7
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.168539
| 89
| 5
| 34
| 17.8
| 0.905405
| 0
| 0
| 0
| 0
| 0
| 0.078652
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
8fd15d8d08dc9ac65b46cdb43fd15e5581c97ffc
| 127
|
py
|
Python
|
stubs/asttokens/util.py
|
jamescooke/flake8-aaa
|
9df248e10538946531b67da4564bb229a91baece
|
[
"MIT"
] | 44
|
2018-04-08T21:25:43.000Z
|
2022-01-20T14:28:16.000Z
|
stubs/asttokens/util.py
|
jamescooke/flake8-aaa
|
9df248e10538946531b67da4564bb229a91baece
|
[
"MIT"
] | 72
|
2018-03-30T14:30:48.000Z
|
2022-03-31T16:18:16.000Z
|
stubs/asttokens/util.py
|
jamescooke/flake8-aaa
|
9df248e10538946531b67da4564bb229a91baece
|
[
"MIT"
] | 1
|
2018-10-17T18:49:25.000Z
|
2018-10-17T18:49:25.000Z
|
import collections
class Token(collections.namedtuple('Token', 'type string start end line index startpos endpos')):
...
| 21.166667
| 97
| 0.740157
| 15
| 127
| 6.266667
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.149606
| 127
| 5
| 98
| 25.4
| 0.87037
| 0
| 0
| 0
| 0
| 0
| 0.417323
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
8fe9c1240cc746c7e927be39f2fe9727d0339c7f
| 88
|
py
|
Python
|
segme/metric/__init__.py
|
shkarupa-alex/segme
|
d5bc0043f9e709c8ccaf8949d662bc6fd6144006
|
[
"MIT"
] | 2
|
2021-05-25T18:53:00.000Z
|
2021-05-26T12:11:41.000Z
|
segme/metric/__init__.py
|
shkarupa-alex/segme
|
d5bc0043f9e709c8ccaf8949d662bc6fd6144006
|
[
"MIT"
] | null | null | null |
segme/metric/__init__.py
|
shkarupa-alex/segme
|
d5bc0043f9e709c8ccaf8949d662bc6fd6144006
|
[
"MIT"
] | 2
|
2021-11-21T02:39:37.000Z
|
2021-12-08T07:26:56.000Z
|
from .conn import Conn
from .grad import Grad
from .mse import MSE
from .sad import SAD
| 17.6
| 22
| 0.772727
| 16
| 88
| 4.25
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 88
| 4
| 23
| 22
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
890c561bf0aec0a3506eddd281bfa30bb906a86e
| 92
|
py
|
Python
|
greek_app/apps.py
|
HCDigitalScholarship/GreekPal
|
15a99b1dae971edef0cc90be064fb7cb291c317b
|
[
"MIT"
] | null | null | null |
greek_app/apps.py
|
HCDigitalScholarship/GreekPal
|
15a99b1dae971edef0cc90be064fb7cb291c317b
|
[
"MIT"
] | 28
|
2019-10-08T05:36:39.000Z
|
2022-01-13T02:54:15.000Z
|
greek_app/apps.py
|
HCDigitalScholarship/GreekPal
|
15a99b1dae971edef0cc90be064fb7cb291c317b
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class GreekAppConfig(AppConfig):
name = 'greek_app'
| 15.333333
| 33
| 0.76087
| 11
| 92
| 6.272727
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.163043
| 92
| 5
| 34
| 18.4
| 0.896104
| 0
| 0
| 0
| 0
| 0
| 0.097826
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
8f7b47f31150135672ffa4570db36af3997c8bdf
| 47
|
py
|
Python
|
biker_vasek.py
|
nikitadragaa/---
|
61cfdd2c078e221e2412a1e776ae8e9afb840562
|
[
"MIT"
] | 1
|
2020-11-26T19:12:09.000Z
|
2020-11-26T19:12:09.000Z
|
biker_vasek.py
|
nikitadragaa/informatics_first_module
|
61cfdd2c078e221e2412a1e776ae8e9afb840562
|
[
"MIT"
] | null | null | null |
biker_vasek.py
|
nikitadragaa/informatics_first_module
|
61cfdd2c078e221e2412a1e776ae8e9afb840562
|
[
"MIT"
] | null | null | null |
a=int(input())
b=int(input())
print((a*b)%109)
| 11.75
| 16
| 0.595745
| 10
| 47
| 2.8
| 0.6
| 0.571429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.068182
| 0.06383
| 47
| 3
| 17
| 15.666667
| 0.568182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
8f8117fc5388acb6f8832bf7311ec1881c023df3
| 81
|
py
|
Python
|
pks/apps.py
|
xingyifei2016/clusterCAD
|
fb139edc90e3b963ac6bfc9f6890f0a4e4f356d6
|
[
"BSD-3-Clause-LBNL"
] | 7
|
2018-11-06T00:04:47.000Z
|
2021-08-05T04:37:12.000Z
|
pks/apps.py
|
xingyifei2016/clusterCAD
|
fb139edc90e3b963ac6bfc9f6890f0a4e4f356d6
|
[
"BSD-3-Clause-LBNL"
] | 26
|
2017-08-11T21:51:46.000Z
|
2022-03-11T23:18:25.000Z
|
pks/apps.py
|
xingyifei2016/clusterCAD
|
fb139edc90e3b963ac6bfc9f6890f0a4e4f356d6
|
[
"BSD-3-Clause-LBNL"
] | 7
|
2017-08-16T17:28:40.000Z
|
2022-03-02T00:07:00.000Z
|
from django.apps import AppConfig
class PksConfig(AppConfig):
name = 'pks'
| 13.5
| 33
| 0.728395
| 10
| 81
| 5.9
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.185185
| 81
| 5
| 34
| 16.2
| 0.893939
| 0
| 0
| 0
| 0
| 0
| 0.037037
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
56e0b3bab19585c01401fc4f7a552420d5771661
| 4,066
|
py
|
Python
|
tsai/models/ROCKET.py
|
radi-cho/tsai
|
32f24d55ee58df1a14d1e68618f230097a266c77
|
[
"Apache-2.0"
] | 1
|
2022-01-02T18:21:27.000Z
|
2022-01-02T18:21:27.000Z
|
tsai/models/ROCKET.py
|
radi-cho/tsai
|
32f24d55ee58df1a14d1e68618f230097a266c77
|
[
"Apache-2.0"
] | 31
|
2021-12-01T23:08:51.000Z
|
2021-12-29T02:59:49.000Z
|
tsai/models/ROCKET.py
|
radi-cho/tsai
|
32f24d55ee58df1a14d1e68618f230097a266c77
|
[
"Apache-2.0"
] | 1
|
2022-03-13T16:47:04.000Z
|
2022-03-13T16:47:04.000Z
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/111_models.ROCKET.ipynb (unless otherwise specified).
__all__ = ['RocketClassifier', 'load_rocket', 'RocketRegressor']
# Cell
import sklearn
from sklearn.linear_model import RidgeClassifierCV, RidgeCV
from sklearn.metrics import make_scorer
from ..imports import *
from ..data.external import *
from .layers import *
warnings.filterwarnings("ignore", category=FutureWarning)
# Cell
class RocketClassifier(sklearn.pipeline.Pipeline):
"""Time series classification using ROCKET features and a linear classifier"""
def __init__(self, num_kernels=10_000, normalize_input=True, random_state=None,
alphas=np.logspace(-3, 3, 7), normalize_features=True, memory=None, verbose=False, scoring=None, class_weight=None, **kwargs):
"""
RocketClassifier is recommended for up to 10k time series.
For a larger dataset, you can use ROCKET (in Pytorch).
scoring = None --> defaults to accuracy.
Rocket args:
num_kernels : int, number of random convolutional kernels (default 10,000)
normalize_input : boolean, whether or not to normalise the input time series per instance (default True)
random_state : Optional random seed (default None)
"""
try:
import sktime
from sktime.transformations.panel.rocket import Rocket
except ImportError:
print("You need to install sktime to be able to use RocketClassifier")
self.steps = [('rocket', Rocket(num_kernels=num_kernels, normalise=normalize_input, random_state=random_state)),
('ridgeclassifiercv', RidgeClassifierCV(alphas=alphas, normalize=normalize_features, scoring=scoring,
class_weight=class_weight, **kwargs))]
store_attr()
self._validate_steps()
def __repr__(self):
return f'Pipeline(steps={self.steps.copy()})'
def save(self, fname='Rocket', path='./models'):
path = Path(path)
filename = path/fname
with open(f'{filename}.pkl', 'wb') as output:
pickle.dump(self, output, pickle.HIGHEST_PROTOCOL)
# Cell
def load_rocket(fname='Rocket', path='./models'):
path = Path(path)
filename = path/fname
with open(f'{filename}.pkl', 'rb') as input:
output = pickle.load(input)
return output
# Cell
class RocketRegressor(sklearn.pipeline.Pipeline):
"""Time series regression using ROCKET features and a linear regressor"""
def __init__(self, num_kernels=10_000, normalize_input=True, random_state=None,
alphas=np.logspace(-3, 3, 7), normalize_features=True, memory=None, verbose=False, scoring=None, **kwargs):
"""
RocketRegressor is recommended for up to 10k time series.
For a larger dataset, you can use ROCKET (in Pytorch).
scoring = None --> defaults to r2.
Args:
num_kernels : int, number of random convolutional kernels (default 10,000)
normalize_input : boolean, whether or not to normalise the input time series per instance (default True)
random_state : Optional random seed (default None)
"""
try:
import sktime
from sktime.transformations.panel.rocket import Rocket
except ImportError:
print("You need to install sktime to be able to use RocketRegressor")
self.steps = [('rocket', Rocket(num_kernels=num_kernels, normalise=normalize_input, random_state=random_state)),
('ridgecv', RidgeCV(alphas=alphas, normalize=normalize_features, scoring=scoring, **kwargs))]
store_attr()
self._validate_steps()
def __repr__(self):
return f'Pipeline(steps={self.steps.copy()})'
def save(self, fname='Rocket', path='./models'):
path = Path(path)
filename = path/fname
with open(f'{filename}.pkl', 'wb') as output:
pickle.dump(self, output, pickle.HIGHEST_PROTOCOL)
| 42.8
| 143
| 0.657649
| 479
| 4,066
| 5.448852
| 0.2881
| 0.030651
| 0.021456
| 0.029119
| 0.748659
| 0.723372
| 0.701149
| 0.661303
| 0.661303
| 0.661303
| 0
| 0.011039
| 0.242499
| 4,066
| 95
| 144
| 42.8
| 0.836364
| 0.266847
| 0
| 0.603774
| 1
| 0
| 0.129249
| 0.024788
| 0
| 0
| 0
| 0
| 0
| 1
| 0.132075
| false
| 0
| 0.226415
| 0.037736
| 0.45283
| 0.037736
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
56fd5170b7684adf06467bcddc3847c7bea4ee50
| 3,438
|
py
|
Python
|
DailyProgrammer/DP20150422B.py
|
DayGitH/Python-Challenges
|
bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf
|
[
"MIT"
] | 2
|
2020-12-23T18:59:22.000Z
|
2021-04-14T13:16:09.000Z
|
DailyProgrammer/DP20150422B.py
|
DayGitH/Python-Challenges
|
bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf
|
[
"MIT"
] | null | null | null |
DailyProgrammer/DP20150422B.py
|
DayGitH/Python-Challenges
|
bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf
|
[
"MIT"
] | null | null | null |
"""
[2015-04-22] Challenge #211 [Intermediate] Ogre Maze
https://www.reddit.com/r/dailyprogrammer/comments/33hwwf/20150422_challenge_211_intermediate_ogre_maze/
#Description:
Today we are going to solve a maze. What? Again? Come on, Simpsons did it. Yah okay so we always pick a hero to walk a
maze. This time our hero is an Ogre.
An ogre is large. Your run of the mill hero "@" takes up a 1x1 spot. Easy. But our beloved hero today is an ogre.
@@
@@
Ogres take up a 2x2 space instead of a 1x1. This makes navigating a maze tougher as you have to handle the bigger ogre.
So I will give you a layout of a swamp. (Ogres navigate swamps while puny heroes navigate caves. That's the unwritten
rules of maze challenges) You will find the path (if possible) for
the ogre to walk to his gold.
#Input:
You will read in a swamp. The swamp is laid out in 10x10 spaces. Each space can be the following:
* . - empty spot
* @ - 1/4th of the 2x2 ogre
* $ - the ogre's gold
* O - sink hole - the ogre cannot touch these. All 2x2 of the Ogre manages to fall down one of these (even if it is a
1x1 spot too. Don't be bothered by this - think of it as a "wall" but in a swamp we call them sink holes)
#Output:
You will navigate the swamp. If you find a path you will display the solution of all the spaces the ogre will occupy to
get to his gold. Use a "&" symbol to show the muddy path created by the ogre to reach his gold. If there is no path at
all then you will output "No Path"
#Example Input 1:
@@........
@@O.......
.....O.O..
..........
..O.O.....
..O....O.O
.O........
..........
.....OO...
.........$
#Example Output 1:
&&.&&&&&&&
&&O&&&&&&&
&&&&&O.O&&
&&&&&&&&&&
..O.O&&&&&
..O..&&O.O
.O...&&&&.
.....&&&&.
.....OO&&&
.......&&&
#Example Input 2:
@@........
@@O.......
.....O.O..
..........
..O.O.....
..O....O.O
.O........
..........
.....OO.O.
.........$
#Example Output 2:
No Path
#FAQ (Will update with answers here)
* Q: Does path have to be shortest Path.
* A: No.
### -
* Q: There could be a few different paths. Which one do I output?
* A: The first one that works. Answers will vary based on how people solve it.
### -
* Q: My output should show all the spots the Ogre moves too or just the optimal path?
* A: The ogre will hit dead ends. But only show the optimal path and not all his dead ends. Think of this as a GPS
Tom-Tom guide for the Ogre so he uses the program to find his gold. TIL Ogres subscribe to /r/dailyprogrammer. (And use
the internet....)
#Challenge Input 1:
$.O...O...
...O......
..........
O..O..O...
..........
O..O..O...
..........
......OO..
O..O....@@
........@@
#Challenge Input 2:
.@@.....O.
.@@.......
..O..O....
.......O..
...O......
..........
.......O.O
...O.O....
.......O..
.........$
#Bonus:
For those seeking more challenge. Instead of using input swamps you will generate a swamp. Place the Ogre randomly.
Place his gold randomly. Generate sinkholes based on the size of the swamp.
For example you are given N for a NxN swamp to generate. Generate a random swamp and apply your solution to it. The
exact design/algorithm for random generation I leave it for you to tinker with. I suggest start with like 15% of the
swamp spots are sinkholes and go up or down based on your results. (So you get paths and not always No Path)
"""
def main():
pass
if __name__ == "__main__":
main()
| 31.254545
| 119
| 0.617219
| 577
| 3,438
| 3.655113
| 0.381283
| 0.039829
| 0.051209
| 0.058796
| 0.075391
| 0.045045
| 0.045045
| 0.045045
| 0.045045
| 0.044571
| 0
| 0.018275
| 0.204188
| 3,438
| 109
| 120
| 31.541284
| 0.752558
| 1.058464
| 0
| 0
| 0
| 0
| 0.125
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0.25
| 0
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 4
|
7122a8e74eb4212d1e6aaea222109cbc1f0c061a
| 18,717
|
py
|
Python
|
tests/src/python/test_qgssearchwidgetwrapper.py
|
dyna-mis/Hilabeling
|
cb7d5d4be29624a20c8a367162dbc6fd779b2b52
|
[
"MIT"
] | null | null | null |
tests/src/python/test_qgssearchwidgetwrapper.py
|
dyna-mis/Hilabeling
|
cb7d5d4be29624a20c8a367162dbc6fd779b2b52
|
[
"MIT"
] | null | null | null |
tests/src/python/test_qgssearchwidgetwrapper.py
|
dyna-mis/Hilabeling
|
cb7d5d4be29624a20c8a367162dbc6fd779b2b52
|
[
"MIT"
] | 1
|
2021-12-25T08:40:30.000Z
|
2021-12-25T08:40:30.000Z
|
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsSearchWidgetWrapper.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '2016-05'
__copyright__ = 'Copyright 2016, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '176c06ceefb5f555205e72b20c962740cc0ec183'
import qgis # NOQA
from qgis.gui import (QgsSearchWidgetWrapper,
QgsDefaultSearchWidgetWrapper,
QgsValueMapSearchWidgetWrapper,
QgsValueRelationSearchWidgetWrapper,
QgsCheckboxSearchWidgetWrapper,
QgsDateTimeSearchWidgetWrapper)
from qgis.core import (QgsVectorLayer,
QgsFeature,
QgsProject,
)
from qgis.PyQt.QtCore import QDateTime, QDate, QTime
from qgis.PyQt.QtWidgets import QWidget
from qgis.testing import start_app, unittest
start_app()
class PyQgsSearchWidgetWrapper(unittest.TestCase):
def testFlagToString(self):
# test converting QgsSearchWidgetWrapper.FilterFlag to string
tests = [QgsSearchWidgetWrapper.EqualTo,
QgsSearchWidgetWrapper.NotEqualTo,
QgsSearchWidgetWrapper.GreaterThan,
QgsSearchWidgetWrapper.LessThan,
QgsSearchWidgetWrapper.GreaterThanOrEqualTo,
QgsSearchWidgetWrapper.LessThanOrEqualTo,
QgsSearchWidgetWrapper.Between,
QgsSearchWidgetWrapper.CaseInsensitive,
QgsSearchWidgetWrapper.Contains,
QgsSearchWidgetWrapper.DoesNotContain,
QgsSearchWidgetWrapper.IsNull,
QgsSearchWidgetWrapper.IsNotNull,
QgsSearchWidgetWrapper.IsNotBetween
]
for t in tests:
self.assertTrue(len(QgsSearchWidgetWrapper.toString(t)) > 0)
def testExclusiveFlags(self):
# test flag exclusive/non exclusive
exclusive = QgsSearchWidgetWrapper.exclusiveFilterFlags()
non_exclusive = QgsSearchWidgetWrapper.nonExclusiveFilterFlags()
for e in exclusive:
self.assertFalse(e in non_exclusive)
class PyQgsDefaultSearchWidgetWrapper(unittest.TestCase):
def testCreateExpression(self):
""" Test creating an expression using the widget"""
layer = QgsVectorLayer("Point?field=fldtxt:string&field=fldint:integer&field=flddate:datetime",
"test", "memory")
parent = QWidget()
w = QgsDefaultSearchWidgetWrapper(layer, 0)
w.initWidget(parent)
line_edit = w.lineEdit()
line_edit.setText('test')
case_sensitive = w.caseSensitiveCheckBox()
case_sensitive.setChecked(False)
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNull), '"fldtxt" IS NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNotNull), '"fldtxt" IS NOT NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), 'lower("fldtxt")=lower(\'test\')')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.NotEqualTo), 'lower("fldtxt")<>lower(\'test\')')
case_sensitive.setChecked(True)
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '"fldtxt"=\'test\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.NotEqualTo), '"fldtxt"<>\'test\'')
case_sensitive.setChecked(False)
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.Contains), '"fldtxt" ILIKE \'%test%\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.DoesNotContain), 'NOT ("fldtxt" ILIKE \'%test%\')')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.StartsWith), '"fldtxt" ILIKE \'test%\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EndsWith), '"fldtxt" ILIKE \'%test\'')
case_sensitive.setChecked(True)
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.Contains), '"fldtxt" LIKE \'%test%\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.DoesNotContain), 'NOT ("fldtxt" LIKE \'%test%\')')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.StartsWith), '"fldtxt" LIKE \'test%\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EndsWith), '"fldtxt" LIKE \'%test\'')
case_sensitive.setChecked(False)
# numeric field
parent = QWidget()
w = QgsDefaultSearchWidgetWrapper(layer, 1)
w.initWidget(parent)
# may need updating if widget layout changes:
line_edit = w.lineEdit()
line_edit.setText('5.5')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '"fldint"=5.5')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.NotEqualTo), '"fldint"<>5.5')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.GreaterThan), '"fldint">5.5')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.LessThan), '"fldint"<5.5')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.GreaterThanOrEqualTo), '"fldint">=5.5')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.LessThanOrEqualTo), '"fldint"<=5.5')
# date/time/datetime
parent = QWidget()
w = QgsDefaultSearchWidgetWrapper(layer, 2)
w.initWidget(parent)
# may need updating if widget layout changes:
line_edit = w.lineEdit()
line_edit.setText('2015-06-03')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '"flddate"=\'2015-06-03\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.NotEqualTo), '"flddate"<>\'2015-06-03\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.GreaterThan), '"flddate">\'2015-06-03\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.LessThan), '"flddate"<\'2015-06-03\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.GreaterThanOrEqualTo), '"flddate">=\'2015-06-03\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.LessThanOrEqualTo), '"flddate"<=\'2015-06-03\'')
class PyQgsValueMapSearchWidgetWrapper(unittest.TestCase):
def testCreateExpression(self):
""" Test creating an expression using the widget"""
layer = QgsVectorLayer("Point?field=fldtxt:string&field=fldint:integer", "test", "memory")
w = QgsValueMapSearchWidgetWrapper(layer, 0)
config = {"map": [{"val1": 1},
{"val2": 200}]}
w.setConfig(config)
c = w.widget()
# first, set it to the "select value" item
c.setCurrentIndex(0)
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNull), '"fldtxt" IS NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNotNull), '"fldtxt" IS NOT NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.NotEqualTo), '')
c.setCurrentIndex(1)
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNull), '"fldtxt" IS NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNotNull), '"fldtxt" IS NOT NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '"fldtxt"=\'1\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.NotEqualTo), '"fldtxt"<>\'1\'')
c.setCurrentIndex(2)
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNull), '"fldtxt" IS NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNotNull), '"fldtxt" IS NOT NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '"fldtxt"=\'200\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.NotEqualTo), '"fldtxt"<>\'200\'')
# try with numeric field
w = QgsValueMapSearchWidgetWrapper(layer, 1)
w.setConfig(config)
c = w.widget()
c.setCurrentIndex(1)
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNull), '"fldint" IS NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNotNull), '"fldint" IS NOT NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '"fldint"=1')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.NotEqualTo), '"fldint"<>1')
class PyQgsValueRelationSearchWidgetWrapper(unittest.TestCase):
def testCreateExpression(self):
""" Test creating an expression using the widget"""
layer = QgsVectorLayer("Point?field=fldtxt:string&field=fldint:integer", "test", "memory")
# setup value relation
parent_layer = QgsVectorLayer("Point?field=stringkey:string&field=intkey:integer&field=display:string", "parent", "memory")
f1 = QgsFeature(parent_layer.fields(), 1)
f1.setAttributes(['a', 1, 'value a'])
f2 = QgsFeature(parent_layer.fields(), 2)
f2.setAttributes(['b', 2, 'value b'])
f3 = QgsFeature(parent_layer.fields(), 3)
f3.setAttributes(['c', 3, 'value c'])
parent_layer.dataProvider().addFeatures([f1, f2, f3])
QgsProject.instance().addMapLayers([layer, parent_layer])
config = {"Layer": parent_layer.id(),
"Key": 'stringkey',
"Value": 'display'}
w = QgsValueRelationSearchWidgetWrapper(layer, 0)
w.setConfig(config)
c = w.widget()
# first, set it to the "select value" item
c.setCurrentIndex(0)
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNull), '"fldtxt" IS NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNotNull), '"fldtxt" IS NOT NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.NotEqualTo), '')
c.setCurrentIndex(1)
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNull), '"fldtxt" IS NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNotNull), '"fldtxt" IS NOT NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '"fldtxt"=\'a\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.NotEqualTo), '"fldtxt"<>\'a\'')
c.setCurrentIndex(2)
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNull), '"fldtxt" IS NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNotNull), '"fldtxt" IS NOT NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '"fldtxt"=\'b\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.NotEqualTo), '"fldtxt"<>\'b\'')
# try with numeric field
w = QgsValueRelationSearchWidgetWrapper(layer, 1)
config['Key'] = 'intkey'
w.setConfig(config)
c = w.widget()
c.setCurrentIndex(c.findText('value c'))
self.assertEqual(c.currentIndex(), 3)
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNull), '"fldint" IS NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNotNull), '"fldint" IS NOT NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '"fldint"=3')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.NotEqualTo), '"fldint"<>3')
# try with allow null set
w = QgsValueRelationSearchWidgetWrapper(layer, 1)
config['AllowNull'] = True
w.setConfig(config)
c = w.widget()
c.setCurrentIndex(c.findText('value c'))
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNull), '"fldint" IS NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNotNull), '"fldint" IS NOT NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '"fldint"=3')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.NotEqualTo), '"fldint"<>3')
# try with line edit
w = QgsValueRelationSearchWidgetWrapper(layer, 1)
config['UseCompleter'] = True
w.setConfig(config)
l = w.widget()
l.setText('value b')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNull), '"fldint" IS NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNotNull), '"fldint" IS NOT NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '"fldint"=2')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.NotEqualTo), '"fldint"<>2')
class PyQgsCheckboxSearchWidgetWrapper(unittest.TestCase):
def testCreateExpression(self):
""" Test creating an expression using the widget"""
layer = QgsVectorLayer("Point?field=fldtxt:string&field=fldint:integer", "test", "memory")
w = QgsCheckboxSearchWidgetWrapper(layer, 0)
config = {"CheckedState": 5,
"UncheckedState": 9}
w.setConfig(config)
c = w.widget()
# first check with string field type
c.setChecked(True)
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNull), '"fldtxt" IS NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNotNull), '"fldtxt" IS NOT NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '"fldtxt"=\'5\'')
c.setChecked(False)
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNull), '"fldtxt" IS NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNotNull), '"fldtxt" IS NOT NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '"fldtxt"=\'9\'')
# try with numeric field
w = QgsCheckboxSearchWidgetWrapper(layer, 1)
w.setConfig(config)
c = w.widget()
c.setChecked(True)
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNull), '"fldint" IS NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNotNull), '"fldint" IS NOT NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '"fldint"=5')
c.setChecked(False)
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNull), '"fldint" IS NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNotNull), '"fldint" IS NOT NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '"fldint"=9')
class PyQgsDateTimeSearchWidgetWrapper(unittest.TestCase):
def testCreateExpression(self):
""" Test creating an expression using the widget"""
layer = QgsVectorLayer("Point?field=date:date&field=time:time&field=datetime:datetime", "test", "memory")
w = QgsDateTimeSearchWidgetWrapper(layer, 0)
config = {"field_format": 'yyyy-MM-dd',
"display_format": 'yyyy-MM-dd'}
w.setConfig(config)
c = w.widget()
# first check with date field type
c.setDateTime(QDateTime(QDate(2013, 4, 5), QTime()))
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNull), '"date" IS NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNotNull), '"date" IS NOT NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '"date"=\'2013-04-05\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.NotEqualTo), '"date"<>\'2013-04-05\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.GreaterThan), '"date">\'2013-04-05\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.LessThan), '"date"<\'2013-04-05\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.GreaterThanOrEqualTo), '"date">=\'2013-04-05\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.LessThanOrEqualTo), '"date"<=\'2013-04-05\'')
# time field type
w = QgsDateTimeSearchWidgetWrapper(layer, 1)
config = {"field_format": 'HH:mm:ss',
"display_format": 'HH:mm:ss'}
w.setConfig(config)
c = w.widget()
c.setDateTime(QDateTime(QDate(2013, 4, 5), QTime(13, 14, 15)))
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNull), '"time" IS NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNotNull), '"time" IS NOT NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '"time"=\'13:14:15\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.NotEqualTo), '"time"<>\'13:14:15\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.GreaterThan), '"time">\'13:14:15\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.LessThan), '"time"<\'13:14:15\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.GreaterThanOrEqualTo), '"time">=\'13:14:15\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.LessThanOrEqualTo), '"time"<=\'13:14:15\'')
# datetime field type
w = QgsDateTimeSearchWidgetWrapper(layer, 2)
config = {"field_format": 'yyyy-MM-dd HH:mm:ss',
"display_format": 'yyyy-MM-dd HH:mm:ss'}
w.setConfig(config)
c = w.widget()
c.setDateTime(QDateTime(QDate(2013, 4, 5), QTime(13, 14, 15)))
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNull), '"datetime" IS NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.IsNotNull), '"datetime" IS NOT NULL')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.EqualTo), '"datetime"=\'2013-04-05 13:14:15\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.NotEqualTo), '"datetime"<>\'2013-04-05 13:14:15\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.GreaterThan), '"datetime">\'2013-04-05 13:14:15\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.LessThan), '"datetime"<\'2013-04-05 13:14:15\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.GreaterThanOrEqualTo), '"datetime">=\'2013-04-05 13:14:15\'')
self.assertEqual(w.createExpression(QgsSearchWidgetWrapper.LessThanOrEqualTo), '"datetime"<=\'2013-04-05 13:14:15\'')
if __name__ == '__main__':
unittest.main()
| 54.095376
| 131
| 0.689534
| 1,763
| 18,717
| 7.290414
| 0.133863
| 0.120205
| 0.126974
| 0.253948
| 0.769315
| 0.728857
| 0.705905
| 0.651988
| 0.573563
| 0.435307
| 0
| 0.024355
| 0.18176
| 18,717
| 345
| 132
| 54.252174
| 0.814887
| 0.061228
| 0
| 0.382239
| 0
| 0
| 0.124929
| 0.024266
| 0
| 0
| 0
| 0
| 0.405405
| 1
| 0.027027
| false
| 0
| 0.023166
| 0
| 0.073359
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
712af4c8d551dfa14800db9ae1b75dfc39f56d2e
| 3,692
|
py
|
Python
|
eval_model.py
|
yaojin17/adversarial-project
|
76af16f126ae701fb3a0a83152b37cbec5e7b28f
|
[
"Apache-2.0"
] | null | null | null |
eval_model.py
|
yaojin17/adversarial-project
|
76af16f126ae701fb3a0a83152b37cbec5e7b28f
|
[
"Apache-2.0"
] | null | null | null |
eval_model.py
|
yaojin17/adversarial-project
|
76af16f126ae701fb3a0a83152b37cbec5e7b28f
|
[
"Apache-2.0"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torchvision import datasets, transforms
from utils import prepare_cifar
from fgsm_attack import FGSMAttack
from tqdm import tqdm, trange
from pgd20 import pgd20_attack
from model import get_model_for_attack
def eval_model(model, test_loader, device):
correct_adv, correct = [], []
distance = []
num = 0
with trange(10000) as pbar:
for x, label in test_loader:
x, label = x.to(device), label.to(device)
batch, c, h, w = x.shape
model.eval()
with torch.no_grad():
output = model(x)
pred = output.argmax(dim=1)
correct.append(pred == label)
num += x.shape[0]
pbar.set_description(f"Acc: {torch.cat(correct).float().mean():.5f}")
pbar.update(x.shape[0])
natural_acc = torch.cat(correct).float().mean()
return natural_acc, distance
def eval_model_pgd(model, test_loader, device, step_size, epsilon, perturb_steps):
correct_adv, correct = [], []
distance = []
num = 0
with trange(10000) as pbar:
for x, label in test_loader:
x, label = x.to(device), label.to(device)
batch, c, h, w = x.shape
x_adv = pgd20_attack(model, x.clone(), label.clone(), step_size, epsilon, perturb_steps)
x_adv = x_adv.to(device)
model.eval()
with torch.no_grad():
output = model(x)
output_adv = model(x_adv)
distance.append(torch.max((x - x_adv).reshape(batch, -1).abs(), dim=1)[0])
pred = output.argmax(dim=1)
pred_adv = output_adv.argmax(dim=1)
correct.append(pred == label)
correct_adv.append(pred_adv == label)
num += x.shape[0]
pbar.set_description(
f"Acc: {torch.cat(correct).float().mean():.5f}, Robust Acc:{torch.cat(correct_adv).float().mean():.5f}")
pbar.update(x.shape[0])
natural_acc = torch.cat(correct).float().mean()
robust_acc = torch.cat(correct_adv).float().mean()
distance = torch.cat(distance).max()
return natural_acc, robust_acc, distance
def eval_model_with_attack(model, test_loader, attack, epsilon, device):
correct_adv, correct = [], []
distance = []
num = 0
nb = 0
with trange(10000) as pbar:
for x, label in test_loader:
x, label = x.to(device), label.to(device)
batch, c, h, w = x.shape
# x_adv = attack(x.clone(), label.clone())
x_adv = attack(model, x.clone(), label.clone())
# x_adv = attack.perturb(x)
# x_adv = torch.min(torch.max(x_adv, x - epsilon), x + epsilon)
x_adv = x_adv.clamp(0, 1)
x_adv = x_adv.to(device)
model.eval()
with torch.no_grad():
output = model(x)
output_adv = model(x_adv)
distance.append(torch.max((x - x_adv).reshape(batch, -1).abs(), dim=1)[0])
pred = output.argmax(dim=1)
pred_adv = output_adv.argmax(dim=1)
correct.append(pred == label)
correct_adv.append(pred_adv == label)
num += x.shape[0]
nb += 1
pbar.set_description(
f"Acc: {torch.cat(correct).float().mean():.5f}, Robust Acc:{torch.cat(correct_adv).float().mean():.5f}")
pbar.update(x.shape[0])
natural_acc = torch.cat(correct).float().mean()
robust_acc = torch.cat(correct_adv).float().mean()
distance = torch.cat(distance).max()
return natural_acc, robust_acc, distance
| 38.458333
| 120
| 0.578548
| 499
| 3,692
| 4.136273
| 0.152305
| 0.031008
| 0.053295
| 0.087209
| 0.791667
| 0.743702
| 0.728198
| 0.670058
| 0.670058
| 0.652616
| 0
| 0.018961
| 0.285753
| 3,692
| 95
| 121
| 38.863158
| 0.763747
| 0.03467
| 0
| 0.764706
| 0
| 0.023529
| 0.068539
| 0.059831
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035294
| false
| 0
| 0.117647
| 0
| 0.188235
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
8531b1ab7623355d9bad7b738e3d301375faa339
| 75
|
py
|
Python
|
Raffle/__init__.py
|
duanegtr/legendv3-cogs
|
ffde1452a75ad42b4f6511b612ce486e96fcd6de
|
[
"MIT"
] | 10
|
2020-05-25T13:32:30.000Z
|
2022-02-01T12:33:07.000Z
|
Raffle/__init__.py
|
darcyle/tl-cogs
|
6b13c4a6247115571c5a2bb6ea98ed1fe2d44d79
|
[
"MIT"
] | 2
|
2020-05-23T22:53:07.000Z
|
2020-08-09T11:28:12.000Z
|
Raffle/__init__.py
|
darcyle/tl-cogs
|
6b13c4a6247115571c5a2bb6ea98ed1fe2d44d79
|
[
"MIT"
] | 7
|
2020-05-18T17:37:33.000Z
|
2022-01-13T04:08:05.000Z
|
from .raffle import Raffle
def setup(bot):
bot.add_cog(Raffle(bot))
| 18.75
| 28
| 0.693333
| 12
| 75
| 4.25
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.186667
| 75
| 4
| 28
| 18.75
| 0.836066
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
8531c7eec28be5dec06698cc6a230d91cb72ae17
| 88
|
py
|
Python
|
tritimap/__init__.py
|
zwbao/Triti-Map
|
58d79d773df3862b6e03717bf3563d8c427c7027
|
[
"MIT"
] | null | null | null |
tritimap/__init__.py
|
zwbao/Triti-Map
|
58d79d773df3862b6e03717bf3563d8c427c7027
|
[
"MIT"
] | null | null | null |
tritimap/__init__.py
|
zwbao/Triti-Map
|
58d79d773df3862b6e03717bf3563d8c427c7027
|
[
"MIT"
] | null | null | null |
import os
__version__ = "0.9.2"
root_dir = os.path.dirname(os.path.abspath(__file__))
| 14.666667
| 53
| 0.727273
| 15
| 88
| 3.666667
| 0.8
| 0.218182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.038462
| 0.113636
| 88
| 5
| 54
| 17.6
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0.056818
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
854c44ca4ed3e6cbb53690f31ac48a7258c545d7
| 283
|
py
|
Python
|
Lista PythonBrasil/exer16.py
|
GlauberGoncalves/Python
|
cdea025a3b8a0304455e1f8561c1d13e00040d8e
|
[
"MIT"
] | 3
|
2017-01-29T00:39:31.000Z
|
2017-06-16T18:53:36.000Z
|
Lista PythonBrasil/exer16.py
|
GlauberGoncalves/Python
|
cdea025a3b8a0304455e1f8561c1d13e00040d8e
|
[
"MIT"
] | null | null | null |
Lista PythonBrasil/exer16.py
|
GlauberGoncalves/Python
|
cdea025a3b8a0304455e1f8561c1d13e00040d8e
|
[
"MIT"
] | null | null | null |
t = float(input('informe o tamanho em m² '))
l = float(t / 3)
if l % 18 == 0:
print('voce precisara de %f latas ' %(l/18))
print('Preço: R$ %0.2f' %((l/18)*80))
else:
print('voce precisara de %f latas ' %((l//18+1)))
print('Preço: R$ %0.2f' %((l//18 + 1) * 80))
| 28.3
| 53
| 0.515901
| 51
| 283
| 2.862745
| 0.490196
| 0.10274
| 0.246575
| 0.273973
| 0.630137
| 0.630137
| 0.630137
| 0.39726
| 0
| 0
| 0
| 0.106481
| 0.236749
| 283
| 9
| 54
| 31.444444
| 0.569444
| 0
| 0
| 0
| 0
| 0
| 0.381625
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
8561b65f73f039098088547f5dc3f1ca3cf952f5
| 145
|
py
|
Python
|
cloudberry-py/cloudberry/api/json_util.py
|
olliekrk/cloud-berry
|
8b39fb0b4f8772348fb50c0c1d0200c96df03cbe
|
[
"MIT"
] | null | null | null |
cloudberry-py/cloudberry/api/json_util.py
|
olliekrk/cloud-berry
|
8b39fb0b4f8772348fb50c0c1d0200c96df03cbe
|
[
"MIT"
] | null | null | null |
cloudberry-py/cloudberry/api/json_util.py
|
olliekrk/cloud-berry
|
8b39fb0b4f8772348fb50c0c1d0200c96df03cbe
|
[
"MIT"
] | null | null | null |
import json
class JSONUtil:
@staticmethod
def multipart_payload(payload):
return None, json.dumps(payload), 'application/json'
| 18.125
| 60
| 0.710345
| 16
| 145
| 6.375
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 145
| 7
| 61
| 20.714286
| 0.87931
| 0
| 0
| 0
| 0
| 0
| 0.110345
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.2
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
85a7f592ed553404754f7344c9234b29657d2ed5
| 331
|
py
|
Python
|
Coding-Challenges/largestSubsequence/largest_subsequence.py
|
FergusDevelopmentLLC/Coders-Workshop
|
3513bd5f79eaa85b4d2a648c5f343a224842325d
|
[
"MIT"
] | 33
|
2019-12-02T23:29:47.000Z
|
2022-03-24T02:40:36.000Z
|
Coding-Challenges/largestSubsequence/largest_subsequence.py
|
FergusDevelopmentLLC/Coders-Workshop
|
3513bd5f79eaa85b4d2a648c5f343a224842325d
|
[
"MIT"
] | 39
|
2020-01-15T19:28:12.000Z
|
2021-11-26T05:13:29.000Z
|
Coding-Challenges/largestSubsequence/largest_subsequence.py
|
FergusDevelopmentLLC/Coders-Workshop
|
3513bd5f79eaa85b4d2a648c5f343a224842325d
|
[
"MIT"
] | 49
|
2019-12-02T23:29:53.000Z
|
2022-03-03T01:11:37.000Z
|
#!/usr/bin/env python3
def largest_subsequence(s1, s2):
pass
print(largest_subsequence("ABAZDC", "BACBAD")) # "ABAD"
print(largest_subsequence("AGGTAB", "GXTXAYB")) # "GTAB"
print(largest_subsequence("aaaa", "aa")) # "aa"
print(largest_subsequence("", "...")) # ""
print(largest_subsequence("ABBA", "ABCABA")) # "ABBA"
| 27.583333
| 57
| 0.667674
| 37
| 331
| 5.810811
| 0.567568
| 0.502326
| 0.534884
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010239
| 0.114804
| 331
| 11
| 58
| 30.090909
| 0.723549
| 0.151057
| 0
| 0
| 0
| 0
| 0.16
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0.142857
| 0
| 0
| 0.142857
| 0.714286
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
|
0
| 4
|
a4119e1240314b1bbd0685ee534363309959e324
| 476
|
py
|
Python
|
d3dshot/capture_outputs/pytorch_float_capture_output.py
|
Hualin-Peng/D3DShot
|
a506e1b3ab4effb49af5cc171fc87822a48b3ba9
|
[
"MIT"
] | 238
|
2019-04-04T16:15:45.000Z
|
2022-03-30T06:47:47.000Z
|
d3dshot/capture_outputs/pytorch_float_capture_output.py
|
Hualin-Peng/D3DShot
|
a506e1b3ab4effb49af5cc171fc87822a48b3ba9
|
[
"MIT"
] | 51
|
2019-05-02T13:22:53.000Z
|
2022-01-05T16:15:55.000Z
|
d3dshot/capture_outputs/pytorch_float_capture_output.py
|
Hualin-Peng/D3DShot
|
a506e1b3ab4effb49af5cc171fc87822a48b3ba9
|
[
"MIT"
] | 41
|
2019-04-04T20:41:19.000Z
|
2022-02-26T09:26:03.000Z
|
import numpy as np
from PIL import Image
from d3dshot.capture_outputs.pytorch_capture_output import PytorchCaptureOutput
class PytorchFloatCaptureOutput(PytorchCaptureOutput):
def process(self, pointer, pitch, size, width, height, region, rotation):
image = super().process(pointer, pitch, size, width, height, region, rotation)
return image / 255.0
def to_pil(self, frame):
return Image.fromarray(np.array(frame * 255.0, dtype=np.uint8))
| 31.733333
| 86
| 0.737395
| 60
| 476
| 5.783333
| 0.583333
| 0.069164
| 0.092219
| 0.121037
| 0.236311
| 0.236311
| 0.236311
| 0
| 0
| 0
| 0
| 0.025316
| 0.170168
| 476
| 14
| 87
| 34
| 0.853165
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.222222
| false
| 0
| 0.333333
| 0.111111
| 0.888889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 4
|
a434ec81bfc9e87e73d857293f1cc96d1049d22e
| 207
|
py
|
Python
|
debeauty/forms.py
|
gabriel-py/debeauty
|
fcb1b2afe72dd45ea1a6e673ce4eaf835f4f3030
|
[
"MIT"
] | null | null | null |
debeauty/forms.py
|
gabriel-py/debeauty
|
fcb1b2afe72dd45ea1a6e673ce4eaf835f4f3030
|
[
"MIT"
] | null | null | null |
debeauty/forms.py
|
gabriel-py/debeauty
|
fcb1b2afe72dd45ea1a6e673ce4eaf835f4f3030
|
[
"MIT"
] | null | null | null |
from django import forms
class NovoPedido(forms.Form):
data_realizacao_desejada = forms.DateField()
horario_inicio = forms.TimeField(required=False)
horario_fim = forms.TimeField(required=False)
| 34.5
| 52
| 0.782609
| 25
| 207
| 6.32
| 0.68
| 0.177215
| 0.278481
| 0.341772
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130435
| 207
| 6
| 53
| 34.5
| 0.877778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
a44f020b904b67da29fbc45f7db81180d181ac35
| 12,210
|
py
|
Python
|
aries_cloudagent/commands/tests/test_upgrade.py
|
kuraakhilesh8230/aries-cloudagent-python
|
ee384d1330f6a50ff45a507392ce54f92900f23a
|
[
"Apache-2.0"
] | 4
|
2019-07-01T13:12:50.000Z
|
2019-07-02T20:01:37.000Z
|
aries_cloudagent/commands/tests/test_upgrade.py
|
kuraakhilesh8230/aries-cloudagent-python
|
ee384d1330f6a50ff45a507392ce54f92900f23a
|
[
"Apache-2.0"
] | 29
|
2019-06-24T22:48:04.000Z
|
2019-07-02T20:52:16.000Z
|
aries_cloudagent/commands/tests/test_upgrade.py
|
kuraakhilesh8230/aries-cloudagent-python
|
ee384d1330f6a50ff45a507392ce54f92900f23a
|
[
"Apache-2.0"
] | 12
|
2019-06-24T22:17:44.000Z
|
2019-07-02T19:49:31.000Z
|
import asyncio
from asynctest import mock as async_mock, TestCase as AsyncTestCase
from ...core.in_memory import InMemoryProfile
from ...config.error import ArgsParseError
from ...connections.models.conn_record import ConnRecord
from ...storage.base import BaseStorage
from ...storage.record import StorageRecord
from ...version import __version__
from .. import upgrade as test_module
from ..upgrade import UpgradeError
class TestUpgrade(AsyncTestCase):
async def setUp(self):
self.session = InMemoryProfile.test_session()
self.profile = self.session.profile
self.session_storage = InMemoryProfile.test_session()
self.profile_storage = self.session_storage.profile
self.storage = self.session_storage.inject(BaseStorage)
record = StorageRecord(
"acapy_version",
"v0.7.2",
)
await self.storage.add_record(record)
def test_bad_calls(self):
with self.assertRaises(SystemExit):
test_module.execute(["bad"])
async def test_upgrade_storage_from_version_included(self):
with async_mock.patch.object(
test_module,
"wallet_config",
async_mock.CoroutineMock(
return_value=(
self.profile_storage,
async_mock.CoroutineMock(did="public DID", verkey="verkey"),
)
),
), async_mock.patch.object(
ConnRecord,
"query",
async_mock.CoroutineMock(return_value=[ConnRecord()]),
), async_mock.patch.object(
ConnRecord, "save", async_mock.CoroutineMock()
):
await test_module.upgrade(
{
"upgrade.config_path": "./aries_cloudagent/commands/default_version_upgrade_config.yml",
"upgrade.from_version": "v0.7.2",
}
)
async def test_upgrade_storage_missing_from_version(self):
with async_mock.patch.object(
test_module,
"wallet_config",
async_mock.CoroutineMock(
return_value=(
self.profile_storage,
async_mock.CoroutineMock(did="public DID", verkey="verkey"),
)
),
), async_mock.patch.object(
ConnRecord,
"query",
async_mock.CoroutineMock(return_value=[ConnRecord()]),
), async_mock.patch.object(
ConnRecord, "save", async_mock.CoroutineMock()
):
await test_module.upgrade({})
async def test_upgrade_from_version(self):
with async_mock.patch.object(
test_module,
"wallet_config",
async_mock.CoroutineMock(
return_value=(
self.profile,
async_mock.CoroutineMock(did="public DID", verkey="verkey"),
)
),
), async_mock.patch.object(
ConnRecord,
"query",
async_mock.CoroutineMock(return_value=[ConnRecord()]),
), async_mock.patch.object(
ConnRecord, "save", async_mock.CoroutineMock()
):
await test_module.upgrade(
{
"upgrade.from_version": "v0.7.2",
}
)
async def test_upgrade_callable(self):
with async_mock.patch.object(
test_module,
"wallet_config",
async_mock.CoroutineMock(
return_value=(
self.profile,
async_mock.CoroutineMock(did="public DID", verkey="verkey"),
)
),
), async_mock.patch.object(
test_module.yaml,
"safe_load",
async_mock.MagicMock(
return_value={
"v0.7.2": {
"resave_records": {
"base_record_path": [
"aries_cloudagent.connections.models.conn_record.ConnRecord"
]
},
"update_existing_records": True,
},
}
),
):
await test_module.upgrade(
{
"upgrade.from_version": "v0.7.2",
}
)
async def test_upgrade_x_same_version(self):
version_storage_record = await self.storage.find_record(
type_filter="acapy_version", tag_query={}
)
await self.storage.update_record(version_storage_record, f"v{__version__}", {})
with async_mock.patch.object(
test_module,
"wallet_config",
async_mock.CoroutineMock(
return_value=(
self.profile_storage,
async_mock.CoroutineMock(did="public DID", verkey="verkey"),
)
),
):
with self.assertRaises(UpgradeError):
await test_module.upgrade(
{
"upgrade.config_path": "./aries_cloudagent/commands/default_version_upgrade_config.yml",
}
)
async def test_upgrade_missing_from_version(self):
with async_mock.patch.object(
test_module,
"wallet_config",
async_mock.CoroutineMock(
return_value=(
self.profile,
async_mock.CoroutineMock(did="public DID", verkey="verkey"),
)
),
), async_mock.patch.object(
ConnRecord,
"query",
async_mock.CoroutineMock(return_value=[ConnRecord()]),
), async_mock.patch.object(
ConnRecord, "save", async_mock.CoroutineMock()
):
await test_module.upgrade(
{
"upgrade.config_path": "./aries_cloudagent/commands/default_version_upgrade_config.yml",
}
)
async def test_upgrade_x_callable_not_set(self):
with async_mock.patch.object(
test_module,
"wallet_config",
async_mock.CoroutineMock(
return_value=(
self.profile,
async_mock.CoroutineMock(did="public DID", verkey="verkey"),
)
),
), async_mock.patch.object(
test_module.yaml,
"safe_load",
async_mock.MagicMock(
return_value={
"v0.7.2": {
"resave_records": {
"base_record_path": [
"aries_cloudagent.connections.models.conn_record.ConnRecord"
]
},
"update_existing_records": True,
},
"v0.6.0": {"update_existing_records": True},
}
),
):
with self.assertRaises(UpgradeError) as ctx:
await test_module.upgrade(
{
"upgrade.from_version": "v0.6.0",
}
)
assert "No update_existing_records function specified" in str(ctx.exception)
async def test_upgrade_x_class_not_found(self):
with async_mock.patch.object(
test_module,
"wallet_config",
async_mock.CoroutineMock(
return_value=(
self.profile,
async_mock.CoroutineMock(did="public DID", verkey="verkey"),
)
),
), async_mock.patch.object(
test_module.yaml,
"safe_load",
async_mock.MagicMock(
return_value={
"v0.7.2": {
"resave_records": {
"base_record_path": [
"aries_cloudagent.connections.models.conn_record.Invalid"
],
}
},
}
),
):
with self.assertRaises(UpgradeError) as ctx:
await test_module.upgrade(
{
"upgrade.from_version": "v0.7.2",
}
)
assert "Unknown Record type" in str(ctx.exception)
async def test_execute(self):
with async_mock.patch.object(
test_module,
"wallet_config",
async_mock.CoroutineMock(
return_value=(
self.profile,
async_mock.CoroutineMock(did="public DID", verkey="verkey"),
)
),
), async_mock.patch.object(
ConnRecord,
"query",
async_mock.CoroutineMock(return_value=[ConnRecord()]),
), async_mock.patch.object(
ConnRecord, "save", async_mock.CoroutineMock()
), async_mock.patch.object(
asyncio, "get_event_loop", async_mock.MagicMock()
) as mock_get_event_loop:
mock_get_event_loop.return_value = async_mock.MagicMock(
run_until_complete=async_mock.MagicMock(),
)
test_module.execute(
[
"--upgrade-config",
"./aries_cloudagent/config/tests/test-acapy-upgrade-config.yaml",
"--from-version",
"v0.7.2",
]
)
async def test_upgrade_x_invalid_record_type(self):
with async_mock.patch.object(
test_module,
"wallet_config",
async_mock.CoroutineMock(
return_value=(
self.profile,
async_mock.CoroutineMock(did="public DID", verkey="verkey"),
)
),
), async_mock.patch.object(
test_module.yaml,
"safe_load",
async_mock.MagicMock(
return_value={
"v0.7.2": {
"resave_records": {
"base_exch_record_path": [
"aries_cloudagent.connections.models.connection_target.ConnectionTarget"
],
}
}
}
),
):
with self.assertRaises(UpgradeError) as ctx:
await test_module.upgrade(
{
"upgrade.from_version": "v0.7.2",
}
)
assert "Only BaseRecord can be resaved" in str(ctx.exception)
async def test_upgrade_x_invalid_config(self):
with async_mock.patch.object(
test_module.yaml,
"safe_load",
async_mock.MagicMock(return_value={}),
):
with self.assertRaises(UpgradeError) as ctx:
await test_module.upgrade({})
assert "No version configs found in" in str(ctx.exception)
async def test_upgrade_x_from_version_not_in_config(self):
with async_mock.patch.object(
test_module,
"wallet_config",
async_mock.CoroutineMock(
return_value=(
self.profile,
async_mock.CoroutineMock(did="public DID", verkey="verkey"),
)
),
):
with self.assertRaises(UpgradeError) as ctx:
await test_module.upgrade(
{
"upgrade.from_version": "v1.2.3",
}
)
assert "No upgrade configuration found for" in str(ctx.exception)
def test_main(self):
with async_mock.patch.object(
test_module, "__name__", "__main__"
) as mock_name, async_mock.patch.object(
test_module, "execute", async_mock.MagicMock()
) as mock_execute:
test_module.main()
mock_execute.assert_called_once
| 35.289017
| 112
| 0.498198
| 1,048
| 12,210
| 5.51813
| 0.120229
| 0.110496
| 0.121736
| 0.100294
| 0.750303
| 0.723327
| 0.708456
| 0.703441
| 0.69687
| 0.674218
| 0
| 0.00584
| 0.410975
| 12,210
| 345
| 113
| 35.391304
| 0.798248
| 0
| 0
| 0.618462
| 0
| 0
| 0.134152
| 0.049304
| 0
| 0
| 0
| 0
| 0.04
| 1
| 0.006154
| false
| 0
| 0.030769
| 0
| 0.04
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
a4826abb269c9b1e496e442524fa5f81ca95d1ce
| 281
|
py
|
Python
|
src/rubrix/server/tasks/api.py
|
drahnreb/rubrix
|
340e545baf4d65a0d94e3c671ad6c93ff1d59700
|
[
"Apache-2.0"
] | null | null | null |
src/rubrix/server/tasks/api.py
|
drahnreb/rubrix
|
340e545baf4d65a0d94e3c671ad6c93ff1d59700
|
[
"Apache-2.0"
] | null | null | null |
src/rubrix/server/tasks/api.py
|
drahnreb/rubrix
|
340e545baf4d65a0d94e3c671ad6c93ff1d59700
|
[
"Apache-2.0"
] | null | null | null |
from fastapi import APIRouter
from .text_classification import api as text_classification
from .token_classification import api as token_classification
router = APIRouter()
for task_api in [text_classification, token_classification]:
router.include_router(task_api.router)
| 25.545455
| 61
| 0.836299
| 36
| 281
| 6.277778
| 0.388889
| 0.238938
| 0.20354
| 0.221239
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117438
| 281
| 10
| 62
| 28.1
| 0.91129
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
a49c560366637a4b9c308293ac6bef9e243f0019
| 736
|
py
|
Python
|
tabular/tests/unittests/models/test_linear.py
|
zhiqiangdon/autogluon
|
71ee7ef0f05d8f0aad112d8c1719174aa33194d9
|
[
"Apache-2.0"
] | 4,462
|
2019-12-09T17:41:07.000Z
|
2022-03-31T22:00:41.000Z
|
tabular/tests/unittests/models/test_linear.py
|
zhiqiangdon/autogluon
|
71ee7ef0f05d8f0aad112d8c1719174aa33194d9
|
[
"Apache-2.0"
] | 1,408
|
2019-12-09T17:48:59.000Z
|
2022-03-31T20:24:12.000Z
|
tabular/tests/unittests/models/test_linear.py
|
zhiqiangdon/autogluon
|
71ee7ef0f05d8f0aad112d8c1719174aa33194d9
|
[
"Apache-2.0"
] | 623
|
2019-12-10T02:04:18.000Z
|
2022-03-20T17:11:01.000Z
|
from autogluon.tabular.models.lr.lr_model import LinearModel
def test_linear_binary(fit_helper):
fit_args = dict(
hyperparameters={LinearModel: {}},
)
dataset_name = 'adult'
fit_helper.fit_and_validate_dataset(dataset_name=dataset_name, fit_args=fit_args)
def test_linear_multiclass(fit_helper):
fit_args = dict(
hyperparameters={LinearModel: {}},
)
dataset_name = 'covertype'
fit_helper.fit_and_validate_dataset(dataset_name=dataset_name, fit_args=fit_args)
def test_linear_regression(fit_helper):
fit_args = dict(
hyperparameters={LinearModel: {}},
)
dataset_name = 'ames'
fit_helper.fit_and_validate_dataset(dataset_name=dataset_name, fit_args=fit_args)
| 27.259259
| 85
| 0.73913
| 93
| 736
| 5.419355
| 0.27957
| 0.125
| 0.142857
| 0.095238
| 0.78373
| 0.78373
| 0.78373
| 0.78373
| 0.78373
| 0.444444
| 0
| 0
| 0.165761
| 736
| 26
| 86
| 28.307692
| 0.820847
| 0
| 0
| 0.473684
| 0
| 0
| 0.02449
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.157895
| false
| 0
| 0.052632
| 0
| 0.210526
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
8efe2c4eaba4d7937fbcd9cb1f00f290f7aabadc
| 99
|
py
|
Python
|
08-List-Comprehensions/02-List-Comprehension-with-If-Conditional/main.py
|
0x00000024/learn-python
|
97057dc427feaf8e6da5ca373e7e02d4a1b949ae
|
[
"MIT"
] | null | null | null |
08-List-Comprehensions/02-List-Comprehension-with-If-Conditional/main.py
|
0x00000024/learn-python
|
97057dc427feaf8e6da5ca373e7e02d4a1b949ae
|
[
"MIT"
] | null | null | null |
08-List-Comprehensions/02-List-Comprehension-with-If-Conditional/main.py
|
0x00000024/learn-python
|
97057dc427feaf8e6da5ca373e7e02d4a1b949ae
|
[
"MIT"
] | null | null | null |
temps = [221, 233, 132, -9999, 434]
new_temps = [i for i in temps if i != -9999]
print(new_temps)
| 19.8
| 44
| 0.636364
| 19
| 99
| 3.210526
| 0.631579
| 0.262295
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.253165
| 0.20202
| 99
| 5
| 45
| 19.8
| 0.518987
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
f10f8aba420899a3f319690d32052dd346a3cc8f
| 472
|
py
|
Python
|
ai_controller/controller.py
|
mingsumsze1/mcts
|
e67b80eb138d122a75e12b7d1886edb84de0ede5
|
[
"MIT"
] | null | null | null |
ai_controller/controller.py
|
mingsumsze1/mcts
|
e67b80eb138d122a75e12b7d1886edb84de0ede5
|
[
"MIT"
] | null | null | null |
ai_controller/controller.py
|
mingsumsze1/mcts
|
e67b80eb138d122a75e12b7d1886edb84de0ede5
|
[
"MIT"
] | null | null | null |
from abc import ABC, abstractmethod
from environment.controller import Controller
from environment.game_state import GameState
class AIController(Controller, ABC):
"""
AI player controller
"""
def pick_move(self, state : GameState):
return self.pick_move_with_likelihood(state)[0]
@abstractmethod
def pick_move_with_likelihood(self, state : GameState):
"""
Pick a random move to play and return likelihood
"""
raise NotImplementedError
| 23.6
| 57
| 0.75
| 57
| 472
| 6.070175
| 0.491228
| 0.069364
| 0.063584
| 0.127168
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002571
| 0.175847
| 472
| 19
| 58
| 24.842105
| 0.886889
| 0.146186
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.222222
| false
| 0
| 0.333333
| 0.111111
| 0.777778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 4
|
f123b44307466fc8929799150b7a0789336de9af
| 2,271
|
py
|
Python
|
data/transcoder_evaluation_gfg/python/REMOVE_MINIMUM_NUMBER_ELEMENTS_NO_COMMON_ELEMENT_EXIST_ARRAY.py
|
mxl1n/CodeGen
|
e5101dd5c5e9c3720c70c80f78b18f13e118335a
|
[
"MIT"
] | 241
|
2021-07-20T08:35:20.000Z
|
2022-03-31T02:39:08.000Z
|
data/transcoder_evaluation_gfg/python/REMOVE_MINIMUM_NUMBER_ELEMENTS_NO_COMMON_ELEMENT_EXIST_ARRAY.py
|
mxl1n/CodeGen
|
e5101dd5c5e9c3720c70c80f78b18f13e118335a
|
[
"MIT"
] | 49
|
2021-07-22T23:18:42.000Z
|
2022-03-24T09:15:26.000Z
|
data/transcoder_evaluation_gfg/python/REMOVE_MINIMUM_NUMBER_ELEMENTS_NO_COMMON_ELEMENT_EXIST_ARRAY.py
|
mxl1n/CodeGen
|
e5101dd5c5e9c3720c70c80f78b18f13e118335a
|
[
"MIT"
] | 71
|
2021-07-21T05:17:52.000Z
|
2022-03-29T23:49:28.000Z
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( a , b , n , m ) :
countA = dict ( )
countB = dict ( )
for i in range ( n ) :
countA [ a [ i ] ] = countA.get ( a [ i ] , 0 ) + 1
for i in range ( n ) :
countB [ b [ i ] ] = countB.get ( b [ i ] , 0 ) + 1
res = 0
for x in countA :
if x in countB.keys ( ) :
res += min ( countA [ x ] , countB [ x ] )
return res
#TOFILL
if __name__ == '__main__':
param = [
([4, 7, 10, 12, 12, 24, 29, 38, 45, 51, 53, 54, 59, 68, 72, 73, 85, 86, 88, 92, 92, 95],[7, 9, 17, 23, 25, 26, 29, 32, 35, 56, 56, 58, 59, 59, 62, 63, 72, 82, 85, 86, 95, 97],15,13,),
([-6, 48, -70, 14, -86, 56, 80, -64, 64, -88, -14, 78, 14, -18, 52, 2, 22, 88],[-62, -58, 60, -30, 42, 8, 66, -48, -18, 64, -76, -90, -48, -90, -24, 64, -88, -98],15,9,),
([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1],[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1],10,10,),
([10, 93, 2, 16, 36, 49, 36, 86, 6, 99, 95, 2],[99, 28, 7, 21, 62, 89, 82, 41, 43, 77, 8, 14],6,10,),
([-98, -96, -80, -64, -42, -30, -6, 10, 62, 66, 82],[-62, -50, -42, 24, 44, 46, 52, 54, 60, 72, 72],9,6,),
([1, 1, 0, 1, 1],[1, 1, 1, 0, 0],4,2,),
([7, 11, 13, 15, 21, 33, 36, 39, 66, 99],[23, 36, 42, 44, 62, 65, 70, 78, 82, 89],9,9,),
([-40],[-98],0,0,),
([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],31,26,),
([79, 91, 31, 16, 28, 45, 37, 43, 73, 73, 76, 28, 71, 60, 64, 60, 99, 36, 47, 38, 65, 34, 22, 94, 84, 51, 72, 45, 71, 2],[58, 94, 12, 27, 98, 38, 75, 20, 94, 43, 32, 90, 23, 41, 88, 2, 62, 96, 53, 57, 48, 79, 6, 16, 11, 46, 73, 57, 67, 7],18,18,)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success+=1
print("#Results: %i, %i" % (n_success, len(param)))
| 56.775
| 303
| 0.453545
| 493
| 2,271
| 2.054767
| 0.296146
| 0.138203
| 0.192498
| 0.240869
| 0.16387
| 0.140178
| 0.140178
| 0.140178
| 0.13228
| 0.13228
| 0
| 0.360646
| 0.290621
| 2,271
| 40
| 304
| 56.775
| 0.268156
| 0.081462
| 0
| 0.066667
| 0
| 0
| 0.011544
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033333
| false
| 0
| 0
| 0
| 0.066667
| 0.033333
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
f13e7b6e0e1bf6512e5b87efce7ea8384bfd3c4d
| 587
|
py
|
Python
|
dtt/kubectl/pod.py
|
ymizushi/dtt
|
a7be7466bcda9644594394ab8c16e794f514b15c
|
[
"MIT"
] | 3
|
2019-09-28T02:01:17.000Z
|
2020-05-23T06:27:57.000Z
|
dtt/kubectl/pod.py
|
ymizushi/dtt
|
a7be7466bcda9644594394ab8c16e794f514b15c
|
[
"MIT"
] | 11
|
2019-07-26T12:30:47.000Z
|
2019-08-06T13:45:10.000Z
|
dtt/kubectl/pod.py
|
ymizushi/dtt
|
a7be7466bcda9644594394ab8c16e794f514b15c
|
[
"MIT"
] | null | null | null |
class Pods:
def __init__(self, pods):
self._index = 0
self._pods = pods.items
self._metadata = pods.metadata
@property
def index(self):
return self._index
@property
def current_pod(self):
return self._pods[self._index]
@property
def list(self):
return self._pods
def set_index(self, index):
self._index = index
def add_index(self):
if self._index + 1 < len(self._pods):
self._index += 1
def sub_index(self):
if 0 <= self._index - 1:
self._index -= 1
| 24.458333
| 45
| 0.565588
| 75
| 587
| 4.146667
| 0.266667
| 0.26045
| 0.128617
| 0.163987
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015267
| 0.330494
| 587
| 23
| 46
| 25.521739
| 0.776081
| 0
| 0
| 0.136364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.318182
| false
| 0
| 0
| 0.136364
| 0.5
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 4
|
74adb9d1b6713668874e8ffff93d9e51a052d41f
| 114
|
py
|
Python
|
main.py
|
panzihan/comic-spider
|
2566ed8f725c11fa3f87d7b53da479a530a1bc75
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
panzihan/comic-spider
|
2566ed8f725c11fa3f87d7b53da479a530a1bc75
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
panzihan/comic-spider
|
2566ed8f725c11fa3f87d7b53da479a530a1bc75
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
from scrapy import cmdline
cmdline.execute("scrapy crawl dmzj".split())
| 22.8
| 44
| 0.692982
| 16
| 114
| 4.9375
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009901
| 0.114035
| 114
| 5
| 44
| 22.8
| 0.772277
| 0.333333
| 0
| 0
| 0
| 0
| 0.226667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
74b7830f4dda15ada9843cfeaccea42f032c1f19
| 1,408
|
py
|
Python
|
E9/contact_manager/contact/forms.py
|
wendy006/Web-Dev-Course
|
2f0cfddb7ab4db88ffb4483c7cd4a00abf36c720
|
[
"MIT"
] | null | null | null |
E9/contact_manager/contact/forms.py
|
wendy006/Web-Dev-Course
|
2f0cfddb7ab4db88ffb4483c7cd4a00abf36c720
|
[
"MIT"
] | null | null | null |
E9/contact_manager/contact/forms.py
|
wendy006/Web-Dev-Course
|
2f0cfddb7ab4db88ffb4483c7cd4a00abf36c720
|
[
"MIT"
] | null | null | null |
from django import forms
# class RenewBookForm(forms.Form):
# renewal_date = forms.DateField(help_text="Enter a date between now and 4 weeks (default 3).")
# class NameForm(forms.Form):
# your_name = forms.CharField(label='Your name', max_length=100)
# class UserForm(forms.Form):
# username = forms.CharField(label="用户名", max_length=128)
# password = forms.CharField(label="密码", max_length=256, widget=forms.PasswordInput)
class edit_person_form(forms.Form):
# class Meta:
# model = User
# fields = []
first_name = forms.CharField(label="first_name",max_length=200,required = True)
last_name = forms.CharField(label="last_name",max_length=200,required = True)
email = forms.CharField(label="email",max_length=200,required = True)
phone_number = forms.CharField(label="phone number",max_length=200,required = True)
notes = forms.CharField(label="notes",max_length=200,required = True)
class add_person_form(forms.Form):
first_name = forms.CharField(label="first_name",max_length=200,required = True)
last_name = forms.CharField(label="last_name",max_length=200,required = True)
email = forms.CharField(label=" email",max_length=200,required = True)
phone_number = forms.CharField(label="phone number",max_length=200,required = True)
notes = forms.CharField(label="notes",max_length=200,required = True)
| 36.102564
| 99
| 0.715199
| 190
| 1,408
| 5.142105
| 0.284211
| 0.186285
| 0.252815
| 0.204708
| 0.595701
| 0.595701
| 0.595701
| 0.595701
| 0.595701
| 0.595701
| 0
| 0.034396
| 0.153409
| 1,408
| 38
| 100
| 37.052632
| 0.785235
| 0.314631
| 0
| 0.769231
| 0
| 0
| 0.088328
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.076923
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
74bf45dcb4a434eeeff395649e73785dadc6d3a6
| 97
|
py
|
Python
|
tests/_support/configs/yaml/tasks.py
|
tyewang/invoke
|
e40b3ef3b8e9a9b275b2964c65e2ce878fec4349
|
[
"BSD-2-Clause"
] | null | null | null |
tests/_support/configs/yaml/tasks.py
|
tyewang/invoke
|
e40b3ef3b8e9a9b275b2964c65e2ce878fec4349
|
[
"BSD-2-Clause"
] | null | null | null |
tests/_support/configs/yaml/tasks.py
|
tyewang/invoke
|
e40b3ef3b8e9a9b275b2964c65e2ce878fec4349
|
[
"BSD-2-Clause"
] | null | null | null |
from spec import eq_
from invoke import ctask
@ctask
def mytask(c):
eq_(c.hooray, 'yaml')
| 10.777778
| 25
| 0.690722
| 16
| 97
| 4.0625
| 0.6875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.206186
| 97
| 8
| 26
| 12.125
| 0.844156
| 0
| 0
| 0
| 0
| 0
| 0.041237
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.4
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.