hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4e4723d25186488a87778c2992f1d94ea29f09f2
| 1,426
|
py
|
Python
|
log/tests/test_helpers.py
|
jacebrowning/minilog
|
fbe30aaac1c1c540dff11059445c76f414ef49d5
|
[
"MIT"
] | 16
|
2018-03-02T20:33:16.000Z
|
2021-03-23T18:39:53.000Z
|
log/tests/test_helpers.py
|
jacebrowning/minilog
|
fbe30aaac1c1c540dff11059445c76f414ef49d5
|
[
"MIT"
] | 8
|
2018-03-03T01:10:13.000Z
|
2022-03-07T22:46:51.000Z
|
log/tests/test_helpers.py
|
jacebrowning/minilog
|
fbe30aaac1c1c540dff11059445c76f414ef49d5
|
[
"MIT"
] | 5
|
2019-01-18T09:40:04.000Z
|
2021-06-03T21:16:17.000Z
|
# pylint: disable=unused-variable,expression-not-assigned
from unittest.mock import call, patch
from log import helpers
def describe_init():
@patch('logging.basicConfig')
def with_verbosity_0(config, expect):
helpers.init(format='%(message)s', verbosity=0)
expect(config.mock_calls) == [call(format='%(message)s', level=40)]
@patch('logging.basicConfig')
def with_verbosity_1(config, expect):
helpers.init(format='%(message)s', verbosity=1)
expect(config.mock_calls) == [call(format='%(message)s', level=30)]
@patch('logging.basicConfig')
def with_verbosity_2(config, expect):
helpers.init(format='%(message)s', verbosity=2)
expect(config.mock_calls) == [call(format='%(message)s', level=20)]
@patch('logging.basicConfig')
def with_verbosity_3(config, expect):
helpers.init(format='%(message)s', verbosity=3)
expect(config.mock_calls) == [call(format='%(message)s', level=10)]
@patch('logging.basicConfig')
def with_verbosity_above_3(config, expect):
helpers.init(format='%(message)s', verbosity=4)
expect(config.mock_calls) == [call(format='%(message)s', level=10)]
@patch('logging.basicConfig')
def with_verbosity_0_and_debug(config, expect):
helpers.init(format='%(message)s', verbosity=0, debug=True)
expect(config.mock_calls) == [call(format='%(message)s', level=10)]
| 37.526316
| 75
| 0.670407
| 181
| 1,426
| 5.160221
| 0.232044
| 0.167024
| 0.179872
| 0.167024
| 0.841542
| 0.841542
| 0.716274
| 0.672377
| 0.573876
| 0.231263
| 0
| 0.020033
| 0.159888
| 1,426
| 37
| 76
| 38.540541
| 0.759599
| 0.038569
| 0
| 0.333333
| 0
| 0
| 0.179693
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.259259
| false
| 0
| 0.074074
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
4e4e266e258276167c969ecb8e195097fc5d3aed
| 1,223
|
py
|
Python
|
PyObjCTest/test_nstextfield.py
|
Khan/pyobjc-framework-Cocoa
|
f8b015ea2a72d8d78be6084fb12925c4785b8f1f
|
[
"MIT"
] | 132
|
2015-01-01T10:02:42.000Z
|
2022-03-09T12:51:01.000Z
|
mac/pyobjc-framework-Cocoa/PyObjCTest/test_nstextfield.py
|
mba811/music-player
|
7998986b34cfda2244ef622adefb839331b81a81
|
[
"BSD-2-Clause"
] | 6
|
2015-01-06T08:23:19.000Z
|
2019-03-14T12:22:06.000Z
|
mac/pyobjc-framework-Cocoa/PyObjCTest/test_nstextfield.py
|
mba811/music-player
|
7998986b34cfda2244ef622adefb839331b81a81
|
[
"BSD-2-Clause"
] | 27
|
2015-02-23T11:51:43.000Z
|
2022-03-07T02:34:18.000Z
|
from AppKit import *
from PyObjCTools.TestSupport import *
class TestNSTextField (TestCase):
def testMethods(self):
self.assertResultIsBOOL(NSTextField.drawsBackground)
self.assertArgIsBOOL(NSTextField.setDrawsBackground_, 0)
self.assertResultIsBOOL(NSTextField.isBordered)
self.assertArgIsBOOL(NSTextField.setBordered_, 0)
self.assertResultIsBOOL(NSTextField.isBezeled)
self.assertArgIsBOOL(NSTextField.setBezeled_, 0)
self.assertResultIsBOOL(NSTextField.isEditable)
self.assertArgIsBOOL(NSTextField.setEditable_, 0)
self.assertResultIsBOOL(NSTextField.isSelectable)
self.assertArgIsBOOL(NSTextField.setSelectable_, 0)
self.assertResultIsBOOL(NSTextField.textShouldBeginEditing_)
self.assertResultIsBOOL(NSTextField.textShouldEndEditing_)
self.assertResultIsBOOL(NSTextField.acceptsFirstResponder)
self.assertResultIsBOOL(NSTextField.allowsEditingTextAttributes)
self.assertArgIsBOOL(NSTextField.setAllowsEditingTextAttributes_, 0)
self.assertResultIsBOOL(NSTextField.importsGraphics)
self.assertArgIsBOOL(NSTextField.setImportsGraphics_, 0)
if __name__ == "__main__":
main()
| 47.038462
| 76
| 0.771872
| 92
| 1,223
| 10.076087
| 0.380435
| 0.237325
| 0.355987
| 0.220065
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006763
| 0.15372
| 1,223
| 25
| 77
| 48.92
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0.006541
| 0
| 0
| 0
| 0
| 0
| 0.73913
| 1
| 0.043478
| false
| 0
| 0.173913
| 0
| 0.26087
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
4e67100ed5697a70f7124f119975f6b57f6f207a
| 248
|
py
|
Python
|
POP1/worksheets/recursion-ii/ex01/test_ex01.py
|
silvafj/BBK-MSCCS-2017-18
|
d97b0f8e7434d19a1a4006989c32c4c1deb93842
|
[
"MIT"
] | 1
|
2021-12-29T19:38:56.000Z
|
2021-12-29T19:38:56.000Z
|
POP1/worksheets/recursion-ii/ex01/test_ex01.py
|
silvafj/BBK-MSCCS-2017-18
|
d97b0f8e7434d19a1a4006989c32c4c1deb93842
|
[
"MIT"
] | null | null | null |
POP1/worksheets/recursion-ii/ex01/test_ex01.py
|
silvafj/BBK-MSCCS-2017-18
|
d97b0f8e7434d19a1a4006989c32c4c1deb93842
|
[
"MIT"
] | 2
|
2021-04-08T22:58:03.000Z
|
2021-04-09T01:16:51.000Z
|
from triangle import triangle
def test_one():
assert triangle(1) == [[1]]
def test_two():
assert triangle(2) == [[1], [1, 1]]
def test_six():
assert triangle(6) == [[1], [1, 1], [1, 2, 1], [1, 3, 3, 1], [1, 4, 6, 4, 1], [1, 5, 10, 10, 5, 1]]
| 22.545455
| 100
| 0.528226
| 47
| 248
| 2.723404
| 0.340426
| 0.140625
| 0.070313
| 0.140625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.151515
| 0.201613
| 248
| 10
| 101
| 24.8
| 0.494949
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.428571
| 1
| 0.428571
| true
| 0
| 0.142857
| 0
| 0.571429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
4e6739706d0b96c1b2605efb1704e19b39eb6662
| 124
|
py
|
Python
|
deeppipeline/segmentation/losses/__init__.py
|
lext/deep-pipeline
|
d16039064649b4b72b7c09ac826e578b256bc33a
|
[
"MIT"
] | 5
|
2019-07-11T17:43:10.000Z
|
2019-09-30T23:47:14.000Z
|
deeppipeline/segmentation/losses/__init__.py
|
lext/deep-pipeline
|
d16039064649b4b72b7c09ac826e578b256bc33a
|
[
"MIT"
] | 1
|
2019-05-16T09:08:55.000Z
|
2019-05-18T08:16:12.000Z
|
deeppipeline/segmentation/losses/__init__.py
|
lext/deep-pipeline
|
d16039064649b4b72b7c09ac826e578b256bc33a
|
[
"MIT"
] | null | null | null |
from ._functions import init_binary_loss
from ._losses import BCEWithLogitsLoss2d, SoftJaccardLoss, FocalLoss, CombinedLoss
| 41.333333
| 82
| 0.870968
| 13
| 124
| 8
| 0.846154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00885
| 0.08871
| 124
| 2
| 83
| 62
| 0.911504
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
4ea8057a9171f9cfc499c8bccd1515871515ec37
| 164
|
py
|
Python
|
pdfconduit/conduit/__init__.py
|
mrstephenneal/pdfwatermarker
|
55934803efd91b6b456985be7df93c03d24747c7
|
[
"Apache-2.0"
] | 9
|
2018-08-28T14:08:19.000Z
|
2019-08-22T07:33:14.000Z
|
pdfconduit/conduit/__init__.py
|
mrstephenneal/pdfwatermarker
|
55934803efd91b6b456985be7df93c03d24747c7
|
[
"Apache-2.0"
] | 15
|
2018-08-28T14:08:17.000Z
|
2019-07-08T01:29:34.000Z
|
pdfconduit/conduit/__init__.py
|
mrstephenneal/pdfwatermarker
|
55934803efd91b6b456985be7df93c03d24747c7
|
[
"Apache-2.0"
] | 1
|
2020-08-10T00:14:43.000Z
|
2020-08-10T00:14:43.000Z
|
from pdfconduit.conduit.encrypt import Encrypt
from pdfconduit.conduit.watermark import WatermarkAdd, Watermark
__all__ = ["Encrypt", "Watermark", "WatermarkAdd"]
| 32.8
| 64
| 0.810976
| 17
| 164
| 7.588235
| 0.470588
| 0.217054
| 0.325581
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.091463
| 164
| 4
| 65
| 41
| 0.865772
| 0
| 0
| 0
| 0
| 0
| 0.170732
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
14e2ac86035d495e7de4c4427f6ed3b4740af583
| 150
|
py
|
Python
|
Algorithms/sort/__init__.py
|
TigranGit/CodeBase
|
d58e30b1d83fab4b388ec2cdcb868fa751c62188
|
[
"Apache-2.0"
] | 1
|
2020-08-13T19:09:27.000Z
|
2020-08-13T19:09:27.000Z
|
Algorithms/sort/__init__.py
|
TigranGit/CodeBase
|
d58e30b1d83fab4b388ec2cdcb868fa751c62188
|
[
"Apache-2.0"
] | null | null | null |
Algorithms/sort/__init__.py
|
TigranGit/CodeBase
|
d58e30b1d83fab4b388ec2cdcb868fa751c62188
|
[
"Apache-2.0"
] | null | null | null |
from .bubble_sort import bubble_sort
from .insertion_sort import insertion_sort
from .merge_sort import merge_sort
from .quick_sort import quick_sort
| 30
| 42
| 0.866667
| 24
| 150
| 5.083333
| 0.291667
| 0.327869
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106667
| 150
| 4
| 43
| 37.5
| 0.910448
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
14f7b3b59359835ed4146cae7e600e0551482433
| 246
|
py
|
Python
|
mezzanine_smartling/__init__.py
|
Appdynamics/mezzanine-smartling
|
76ca1dd929ff79e6c766eb46237edc6ab6d38a46
|
[
"Apache-2.0"
] | 3
|
2015-10-09T01:12:27.000Z
|
2015-10-10T09:41:56.000Z
|
mezzanine_smartling/__init__.py
|
Appdynamics/mezzanine-smartling
|
76ca1dd929ff79e6c766eb46237edc6ab6d38a46
|
[
"Apache-2.0"
] | null | null | null |
mezzanine_smartling/__init__.py
|
Appdynamics/mezzanine-smartling
|
76ca1dd929ff79e6c766eb46237edc6ab6d38a46
|
[
"Apache-2.0"
] | 2
|
2018-10-14T10:32:00.000Z
|
2019-08-22T06:07:28.000Z
|
"""
Developed by Craig J Williams
"""
from .managers import default_relational_manager
manager = default_relational_manager
register = default_relational_manager.register
get_registered_models = default_relational_manager.get_registered_models
| 24.6
| 72
| 0.861789
| 29
| 246
| 6.896552
| 0.517241
| 0.34
| 0.48
| 0.32
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.089431
| 246
| 9
| 73
| 27.333333
| 0.892857
| 0.117886
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
09179282e15fa78ef00992cef87e772f47147cb8
| 54
|
py
|
Python
|
dlc2kinematics/utils/__init__.py
|
hausmanns/DLC2Kinematics
|
a0ca7b4ee3547752ed1b9f845ab8c537a8167a4a
|
[
"MIT"
] | 16
|
2020-02-01T18:34:44.000Z
|
2020-05-04T15:01:06.000Z
|
dlc2kinematics/utils/__init__.py
|
hausmanns/DLC2Kinematics
|
a0ca7b4ee3547752ed1b9f845ab8c537a8167a4a
|
[
"MIT"
] | null | null | null |
dlc2kinematics/utils/__init__.py
|
hausmanns/DLC2Kinematics
|
a0ca7b4ee3547752ed1b9f845ab8c537a8167a4a
|
[
"MIT"
] | null | null | null |
from dlc2kinematics.utils.auxiliaryfunctions import *
| 27
| 53
| 0.87037
| 5
| 54
| 9.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02
| 0.074074
| 54
| 1
| 54
| 54
| 0.92
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
0941ac4b95378fa3382fef61d9f1e60f396ffc46
| 55
|
py
|
Python
|
www/tests/test_timeit.py
|
raspberrypieman/brython
|
2cc23d1da6acda604d4a56b4c9d464eb7e374eda
|
[
"BSD-3-Clause"
] | 5,926
|
2015-01-01T07:45:08.000Z
|
2022-03-31T12:34:38.000Z
|
www/tests/test_timeit.py
|
raspberrypieman/brython
|
2cc23d1da6acda604d4a56b4c9d464eb7e374eda
|
[
"BSD-3-Clause"
] | 1,728
|
2015-01-01T01:09:12.000Z
|
2022-03-30T23:25:22.000Z
|
www/tests/test_timeit.py
|
raspberrypieman/brython
|
2cc23d1da6acda604d4a56b4c9d464eb7e374eda
|
[
"BSD-3-Clause"
] | 574
|
2015-01-02T01:36:10.000Z
|
2022-03-26T10:18:48.000Z
|
import timeit
print(timeit.timeit("x=1", number=100))
| 13.75
| 39
| 0.727273
| 9
| 55
| 4.444444
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.08
| 0.090909
| 55
| 3
| 40
| 18.333333
| 0.72
| 0
| 0
| 0
| 0
| 0
| 0.054545
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
1182283b7f990ed0fb769997b7414d4f4561bd4a
| 182
|
py
|
Python
|
ports/gprs_a9/examples/example_43_ussd.py
|
ens4dz/micropython
|
1da32cb5744c97acac52b6dbabef8e77f34b70af
|
[
"MIT"
] | 79
|
2019-02-07T09:04:50.000Z
|
2022-02-20T06:54:44.000Z
|
ports/gprs_a9/examples/example_43_ussd.py
|
ens4dz/micropython
|
1da32cb5744c97acac52b6dbabef8e77f34b70af
|
[
"MIT"
] | 100
|
2019-05-16T09:25:23.000Z
|
2021-09-20T07:46:54.000Z
|
ports/gprs_a9/examples/example_43_ussd.py
|
ens4dz/micropython
|
1da32cb5744c97acac52b6dbabef8e77f34b70af
|
[
"MIT"
] | 25
|
2019-03-20T08:16:57.000Z
|
2022-03-11T17:59:36.000Z
|
# Micropython a9g example
# Source: https://github.com/pulkin/micropython
# Author: pulkin
# Demonstrates how to perform USSD request
import cellular
print(cellular.ussd("*149#"))
| 20.222222
| 47
| 0.763736
| 23
| 182
| 6.043478
| 0.826087
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025
| 0.120879
| 182
| 8
| 48
| 22.75
| 0.84375
| 0.686813
| 0
| 0
| 0
| 0
| 0.098039
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
118614d6415a952b9e436c06c98f993064e7152f
| 88
|
py
|
Python
|
florin/graph/__init__.py
|
jeffkinnison/florin
|
94e76812e9fe27c86b2ce39313d07beb21c8b478
|
[
"MIT"
] | 6
|
2019-06-03T19:11:05.000Z
|
2021-01-13T06:35:43.000Z
|
florin/graph/__init__.py
|
jeffkinnison/florin
|
94e76812e9fe27c86b2ce39313d07beb21c8b478
|
[
"MIT"
] | 4
|
2019-06-10T14:48:15.000Z
|
2019-10-01T16:48:58.000Z
|
florin/graph/__init__.py
|
jeffkinnison/florin
|
94e76812e9fe27c86b2ce39313d07beb21c8b478
|
[
"MIT"
] | 1
|
2019-09-25T17:57:23.000Z
|
2019-09-25T17:57:23.000Z
|
from .florin_graph import FlorinOrderedMultiDiGraph
from .florin_node import FlorinNode
| 29.333333
| 51
| 0.886364
| 10
| 88
| 7.6
| 0.7
| 0.263158
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 88
| 2
| 52
| 44
| 0.95
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
11a4bcc73f9fd707139af1ce954313ccf3edac81
| 132
|
py
|
Python
|
mt_to_hugo_article_converter/tests/test_config.py
|
mazgi/mt-to-hugo-article-converter
|
f124eeaa1a648e91f0bb3b6cf6ee5347b30ca45d
|
[
"MIT"
] | null | null | null |
mt_to_hugo_article_converter/tests/test_config.py
|
mazgi/mt-to-hugo-article-converter
|
f124eeaa1a648e91f0bb3b6cf6ee5347b30ca45d
|
[
"MIT"
] | 3
|
2020-02-14T14:34:58.000Z
|
2020-03-23T07:07:19.000Z
|
mt_to_hugo_article_converter/tests/test_config.py
|
mazgi/mt-to-hugo-article-converter
|
f124eeaa1a648e91f0bb3b6cf6ee5347b30ca45d
|
[
"MIT"
] | null | null | null |
import pytest
from ..config import Config
def test_config():
config = Config()
assert config.get_version() == '2019.10.0'
| 16.5
| 46
| 0.681818
| 18
| 132
| 4.888889
| 0.666667
| 0.272727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.065421
| 0.189394
| 132
| 7
| 47
| 18.857143
| 0.757009
| 0
| 0
| 0
| 0
| 0
| 0.068182
| 0
| 0
| 0
| 0
| 0
| 0.2
| 1
| 0.2
| false
| 0
| 0.4
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
11a709ce146317f81a6364184c6422a317a8bdce
| 76
|
py
|
Python
|
oatomobile/myscripts/myagents/coilAgent/coilutils.py
|
dHonerkamp/oatomobile
|
df16860e21989690f17146d84fc78632eb58bf76
|
[
"Apache-2.0"
] | null | null | null |
oatomobile/myscripts/myagents/coilAgent/coilutils.py
|
dHonerkamp/oatomobile
|
df16860e21989690f17146d84fc78632eb58bf76
|
[
"Apache-2.0"
] | null | null | null |
oatomobile/myscripts/myagents/coilAgent/coilutils.py
|
dHonerkamp/oatomobile
|
df16860e21989690f17146d84fc78632eb58bf76
|
[
"Apache-2.0"
] | null | null | null |
# def command_number_to_index(command_vector):
# return command_vector-2
| 38
| 46
| 0.802632
| 11
| 76
| 5.090909
| 0.727273
| 0.464286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014925
| 0.118421
| 76
| 2
| 47
| 38
| 0.820896
| 0.947368
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
11af5879cd98ecc8a6f85e2e7117a578da9cbfb7
| 169
|
py
|
Python
|
stimuli/Python/one_file_per_item/jap/41_# math_if 5.py
|
ALFA-group/neural_program_comprehension
|
0253911f376cf282af5a5627e38e0a591ad38860
|
[
"MIT"
] | 6
|
2020-04-24T08:16:51.000Z
|
2021-11-01T09:50:46.000Z
|
stimuli/Python/one_file_per_item/jap/41_# math_if 5.py
|
ALFA-group/neural_program_comprehension
|
0253911f376cf282af5a5627e38e0a591ad38860
|
[
"MIT"
] | null | null | null |
stimuli/Python/one_file_per_item/jap/41_# math_if 5.py
|
ALFA-group/neural_program_comprehension
|
0253911f376cf282af5a5627e38e0a591ad38860
|
[
"MIT"
] | 4
|
2021-02-17T20:21:31.000Z
|
2022-02-14T12:43:23.000Z
|
yosoku = 13
jissai = 11
gosa = 2
if (yosoku - jissai < gosa) or (yosoku - jissai > -1*gosa):
print(yosoku - jissai + gosa)
else:
print(yosoku - jissai - gosa)
| 16.9
| 59
| 0.615385
| 24
| 169
| 4.333333
| 0.458333
| 0.461538
| 0.461538
| 0.403846
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.047244
| 0.248521
| 169
| 9
| 60
| 18.777778
| 0.771654
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.285714
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
11b1a40bc3709004cfaf358abd029058e2163944
| 24
|
py
|
Python
|
add.py
|
usha324/python
|
7aa967b8dac8cd0c466652db448cb7e405821389
|
[
"bzip2-1.0.6"
] | null | null | null |
add.py
|
usha324/python
|
7aa967b8dac8cd0c466652db448cb7e405821389
|
[
"bzip2-1.0.6"
] | null | null | null |
add.py
|
usha324/python
|
7aa967b8dac8cd0c466652db448cb7e405821389
|
[
"bzip2-1.0.6"
] | null | null | null |
a=10
b=30
print(a+b)
| 6
| 11
| 0.541667
| 7
| 24
| 1.857143
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.222222
| 0.25
| 24
| 3
| 12
| 8
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
11e73e32842e2cf5e265a5eb2fcfb91b1df4c957
| 252
|
py
|
Python
|
cheat-sheets/notebook-config.py
|
nborwankar/python-fundamentals
|
65ffb7c98fa5834a5ea8bd1f635f91553f062e75
|
[
"Apache-2.0"
] | 4
|
2015-05-15T05:39:28.000Z
|
2017-07-05T10:47:21.000Z
|
cheat-sheets/notebook-config.py
|
zalzala/python-fundamentals
|
b818e5e7030b223ce86788760320c8c716e7a463
|
[
"Apache-2.0"
] | null | null | null |
cheat-sheets/notebook-config.py
|
zalzala/python-fundamentals
|
b818e5e7030b223ce86788760320c8c716e7a463
|
[
"Apache-2.0"
] | 4
|
2015-09-25T17:22:27.000Z
|
2018-10-10T18:07:30.000Z
|
c = get_config()
# Notebook config
c.NotebookApp.certfile = u'/home/collaboratool/collaboratool-cert.pem'
c.NotebookApp.ip = '*'
c.NotebookApp.open_browser = False
c.NotebookApp.password = u'sha1:72eecedd5231:9d66151455494288b82761447d2757836cc514b1'
| 31.5
| 86
| 0.805556
| 29
| 252
| 6.931034
| 0.655172
| 0.238806
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.175966
| 0.075397
| 252
| 7
| 87
| 36
| 0.686695
| 0.059524
| 0
| 0
| 0
| 0
| 0.429787
| 0.425532
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.2
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
ee96860492c9aef91e8da066568c8a625ac36d39
| 35
|
py
|
Python
|
python/basics/current_utime.py
|
u1i/snippets
|
cc61b5ecaede1d1013df51c7b1b6ab10d927f95c
|
[
"MIT"
] | 1
|
2018-06-24T15:40:40.000Z
|
2018-06-24T15:40:40.000Z
|
python/basics/current_utime.py
|
u1i/snippets
|
cc61b5ecaede1d1013df51c7b1b6ab10d927f95c
|
[
"MIT"
] | null | null | null |
python/basics/current_utime.py
|
u1i/snippets
|
cc61b5ecaede1d1013df51c7b1b6ab10d927f95c
|
[
"MIT"
] | null | null | null |
import time
str(int(time.time()))
| 8.75
| 21
| 0.685714
| 6
| 35
| 4
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 35
| 3
| 22
| 11.666667
| 0.774194
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
eeb07bdcc50104f56ff8eeec29b56b9ef74b5b9e
| 64
|
py
|
Python
|
code/features/attributeFeature.py
|
Yuran-Zhao/tf2-multimodal_sarcasm_detection
|
3b23e7fa12c1f04544984768c7d96aa12cd0e525
|
[
"MIT"
] | null | null | null |
code/features/attributeFeature.py
|
Yuran-Zhao/tf2-multimodal_sarcasm_detection
|
3b23e7fa12c1f04544984768c7d96aa12cd0e525
|
[
"MIT"
] | null | null | null |
code/features/attributeFeature.py
|
Yuran-Zhao/tf2-multimodal_sarcasm_detection
|
3b23e7fa12c1f04544984768c7d96aa12cd0e525
|
[
"MIT"
] | null | null | null |
# generate raw attribute vectors and attribute guidance vectors
| 32
| 63
| 0.84375
| 8
| 64
| 6.75
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.140625
| 64
| 1
| 64
| 64
| 0.981818
| 0.953125
| 0
| null | 1
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0102c56517d6e683654d85553a9dcc477288fa7a
| 177
|
py
|
Python
|
pj-examples/colorflash/views.py
|
andrewschaaf/pyxc-pj
|
aa00298c9fcc62b4e3b7c5b8a8114c7545108cbc
|
[
"MIT"
] | 17
|
2015-10-26T22:51:30.000Z
|
2021-07-08T02:45:51.000Z
|
pj-examples/colorflash/views.py
|
andrewschaaf/pyxc-pj
|
aa00298c9fcc62b4e3b7c5b8a8114c7545108cbc
|
[
"MIT"
] | 1
|
2016-08-18T18:17:19.000Z
|
2018-05-09T04:04:05.000Z
|
pj-examples/colorflash/views.py
|
andrewschaaf/pyxc-pj
|
aa00298c9fcc62b4e3b7c5b8a8114c7545108cbc
|
[
"MIT"
] | 2
|
2015-05-15T23:45:49.000Z
|
2016-02-20T21:00:06.000Z
|
from django.shortcuts import render_to_response
def index(request):
from django.http import HttpResponseRedirect
return render_to_response('colorflash/index.html')
| 17.7
| 54
| 0.79661
| 22
| 177
| 6.227273
| 0.681818
| 0.145985
| 0.233577
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.141243
| 177
| 9
| 55
| 19.666667
| 0.901316
| 0
| 0
| 0
| 0
| 0
| 0.12069
| 0.12069
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
010abc50dac2d29cef2d9ecbbd13e6e84cd6cc5b
| 43
|
py
|
Python
|
nobrace/exceptions.py
|
iblis17/nobrace
|
7333029c6cd5f2a885614b5fe64f6c85ee5f296d
|
[
"MIT"
] | 2
|
2015-07-13T09:08:53.000Z
|
2017-05-22T07:56:29.000Z
|
nobrace/exceptions.py
|
iblis17/nobrace
|
7333029c6cd5f2a885614b5fe64f6c85ee5f296d
|
[
"MIT"
] | null | null | null |
nobrace/exceptions.py
|
iblis17/nobrace
|
7333029c6cd5f2a885614b5fe64f6c85ee5f296d
|
[
"MIT"
] | null | null | null |
class FileSuffixError(Exception):
pass
| 14.333333
| 33
| 0.767442
| 4
| 43
| 8.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.162791
| 43
| 2
| 34
| 21.5
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
013cdbf7632d93d32b729c6d816aa2b4d783dda4
| 274
|
py
|
Python
|
app/OneTimeService.py
|
jrquiles18/Kennedy-Pools
|
628375c814c4b4a59fa194739ddab4ab5838d2f2
|
[
"MIT"
] | null | null | null |
app/OneTimeService.py
|
jrquiles18/Kennedy-Pools
|
628375c814c4b4a59fa194739ddab4ab5838d2f2
|
[
"MIT"
] | null | null | null |
app/OneTimeService.py
|
jrquiles18/Kennedy-Pools
|
628375c814c4b4a59fa194739ddab4ab5838d2f2
|
[
"MIT"
] | null | null | null |
"""OneTimeService Model."""
from config.database import Model
class OneTimeService(Model):
"""OneTimeService Model."""
__fillable__ = ['service', 'service_date', 'service_time', 'customer_name', 'address','email', 'cell_phone','service_state', 'cancelled_on']
| 30.444444
| 144
| 0.708029
| 28
| 274
| 6.571429
| 0.714286
| 0.309783
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.120438
| 274
| 9
| 145
| 30.444444
| 0.763485
| 0.156934
| 0
| 0
| 0
| 0
| 0.411765
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
014282dff539de4517f0ad64f8a788e84f64fdd8
| 34
|
py
|
Python
|
examples/frozenset/ex1.py
|
mcorne/python-by-example
|
15339c0909c84b51075587a6a66391100971c033
|
[
"MIT"
] | null | null | null |
examples/frozenset/ex1.py
|
mcorne/python-by-example
|
15339c0909c84b51075587a6a66391100971c033
|
[
"MIT"
] | null | null | null |
examples/frozenset/ex1.py
|
mcorne/python-by-example
|
15339c0909c84b51075587a6a66391100971c033
|
[
"MIT"
] | null | null | null |
print(list(frozenset([1, 2, 3])))
| 17
| 33
| 0.617647
| 6
| 34
| 3.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096774
| 0.088235
| 34
| 1
| 34
| 34
| 0.580645
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
01481ed17d1c1057aeb77f7d692386cc47ca291a
| 78
|
py
|
Python
|
usazovator/bin/__init__.py
|
techlib/usazovator
|
628f10d080cf8ffc2b9b983f64bef17a9c7629b2
|
[
"MIT"
] | 1
|
2020-11-11T13:56:40.000Z
|
2020-11-11T13:56:40.000Z
|
usazovator/bin/__init__.py
|
techlib/usazovator
|
628f10d080cf8ffc2b9b983f64bef17a9c7629b2
|
[
"MIT"
] | 8
|
2016-12-20T11:45:46.000Z
|
2018-03-08T10:13:55.000Z
|
telescreen/decoder/__init__.py
|
techlib/telescreen
|
745b2db4e85dc36b0a268df55c65697d112ab818
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3 -tt
# -*- coding: utf-8 -*-
pass
# vim:set sw=4 ts=4 et:
| 11.142857
| 23
| 0.551282
| 15
| 78
| 2.866667
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.063492
| 0.192308
| 78
| 6
| 24
| 13
| 0.619048
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
01627c9f5963d49c50be94c120bc60a3563a58bc
| 75
|
py
|
Python
|
question3.py
|
Noble-1206/Python-basic
|
bcdb6f089473c7912b3d8790f4d07b818f2bc800
|
[
"MIT"
] | 1
|
2020-03-16T15:13:43.000Z
|
2020-03-16T15:13:43.000Z
|
question3.py
|
Noble-1206/Python-basic
|
bcdb6f089473c7912b3d8790f4d07b818f2bc800
|
[
"MIT"
] | null | null | null |
question3.py
|
Noble-1206/Python-basic
|
bcdb6f089473c7912b3d8790f4d07b818f2bc800
|
[
"MIT"
] | null | null | null |
def what(n, m, s):
print(s+m%n-1)
what(946486979, 973168361, 647886035)
| 25
| 37
| 0.666667
| 14
| 75
| 3.571429
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.4375
| 0.146667
| 75
| 3
| 37
| 25
| 0.34375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.333333
| 0.333333
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0167e6d07600071493377cbcaddcca9901476398
| 207
|
py
|
Python
|
satchmo/apps/shipping/fields.py
|
djangoplicity/satchmo
|
75b672dffb64fed3e55c253d51a0ce73f0747e05
|
[
"BSD-3-Clause"
] | null | null | null |
satchmo/apps/shipping/fields.py
|
djangoplicity/satchmo
|
75b672dffb64fed3e55c253d51a0ce73f0747e05
|
[
"BSD-3-Clause"
] | null | null | null |
satchmo/apps/shipping/fields.py
|
djangoplicity/satchmo
|
75b672dffb64fed3e55c253d51a0ce73f0747e05
|
[
"BSD-3-Clause"
] | null | null | null |
from livesettings.functions import config_choice_values, SettingNotSet
def shipping_choices():
try:
return config_choice_values('SHIPPING','MODULES')
except SettingNotSet:
return ()
| 25.875
| 70
| 0.7343
| 21
| 207
| 7
| 0.714286
| 0.163265
| 0.244898
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.188406
| 207
| 7
| 71
| 29.571429
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0.072464
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| true
| 0
| 0.166667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
0167fb0540f2296ea21028d804d077bb49be6de6
| 43
|
py
|
Python
|
run_55b4.py
|
mpi3d/goodix-fp-dump
|
039940845bd5eeb98cd92d72f267e3be77feb156
|
[
"MIT"
] | 136
|
2021-05-05T14:16:17.000Z
|
2022-03-31T09:04:18.000Z
|
run_55b4.py
|
tsunekotakimoto/goodix-fp-dump
|
b88ecbababd3766314521fe30ee943c4bd1810df
|
[
"MIT"
] | 14
|
2021-08-20T09:49:39.000Z
|
2022-03-20T13:18:05.000Z
|
run_55b4.py
|
tsunekotakimoto/goodix-fp-dump
|
b88ecbababd3766314521fe30ee943c4bd1810df
|
[
"MIT"
] | 11
|
2021-08-02T15:49:11.000Z
|
2022-02-06T22:06:42.000Z
|
from driver_55x4 import main
main(0x55b4)
| 10.75
| 28
| 0.813953
| 7
| 43
| 4.857143
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.189189
| 0.139535
| 43
| 3
| 29
| 14.333333
| 0.72973
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.139535
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
6d7b74a6474d243efb8ec3f121c64f6301e25097
| 126
|
py
|
Python
|
app/webviews/__init__.py
|
ttbug/book_store
|
2c07a4c9e179fd943f67e178bafda9384a666dc0
|
[
"MIT"
] | null | null | null |
app/webviews/__init__.py
|
ttbug/book_store
|
2c07a4c9e179fd943f67e178bafda9384a666dc0
|
[
"MIT"
] | null | null | null |
app/webviews/__init__.py
|
ttbug/book_store
|
2c07a4c9e179fd943f67e178bafda9384a666dc0
|
[
"MIT"
] | null | null | null |
from flask import Blueprint
# 或者也可以把这句放到单独的文件中
web = Blueprint('web', __name__)
# 导入相应的模块,注意位置
from app.webviews import web
| 15.75
| 32
| 0.777778
| 16
| 126
| 5.875
| 0.6875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 126
| 8
| 33
| 15.75
| 0.87037
| 0.230159
| 0
| 0
| 0
| 0
| 0.031579
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 5
|
6dcb5873475de608ae5144d1a1cf1c21906f2ded
| 143
|
py
|
Python
|
Bilibili-Notification/tests/language_test.py
|
cnscj/Bilibili-Notification
|
6d9be407c2eff2afc20bf53adca68bf71e89164a
|
[
"MIT"
] | null | null | null |
Bilibili-Notification/tests/language_test.py
|
cnscj/Bilibili-Notification
|
6d9be407c2eff2afc20bf53adca68bf71e89164a
|
[
"MIT"
] | null | null | null |
Bilibili-Notification/tests/language_test.py
|
cnscj/Bilibili-Notification
|
6d9be407c2eff2afc20bf53adca68bf71e89164a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
#coding:utf-8
from configs import language_config
if __name__ == "__main__":
print(language_config.get_string(1000001))
| 20.428571
| 46
| 0.762238
| 20
| 143
| 4.9
| 0.9
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.062992
| 0.111888
| 143
| 6
| 47
| 23.833333
| 0.708661
| 0.195804
| 0
| 0
| 0
| 0
| 0.070796
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0.333333
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
6de80a0c987b4eb78be560e79267a8aa27e1bf8d
| 118
|
py
|
Python
|
src/geventhttpclient/__init__.py
|
bullno1/geventhttpclient
|
f0a6b5731f9420674181474997164877c72ce298
|
[
"MIT"
] | 1
|
2020-07-30T12:41:23.000Z
|
2020-07-30T12:41:23.000Z
|
venv/lib/python3.7/site-packages/geventhttpclient/__init__.py
|
DiptoChakrabarty/load-tetsing
|
3e0937def0312b3c78a349ffae0dca283d98f902
|
[
"MIT"
] | null | null | null |
venv/lib/python3.7/site-packages/geventhttpclient/__init__.py
|
DiptoChakrabarty/load-tetsing
|
3e0937def0312b3c78a349ffae0dca283d98f902
|
[
"MIT"
] | null | null | null |
# package
__version__ = "1.4.4"
from geventhttpclient.client import HTTPClient
from geventhttpclient.url import URL
| 16.857143
| 46
| 0.805085
| 15
| 118
| 6.066667
| 0.666667
| 0.43956
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.029126
| 0.127119
| 118
| 6
| 47
| 19.666667
| 0.854369
| 0.059322
| 0
| 0
| 0
| 0
| 0.045872
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
096ca3aae21c3ea3bc16e2926bf36b22ebf559c7
| 19
|
py
|
Python
|
wprowadzenie_3/module/dir1/__init__.py
|
pycircle/presentations
|
e2280ddb7c9d94c54242b2955c05fd1327667cfa
|
[
"Apache-2.0"
] | null | null | null |
wprowadzenie_3/module/dir1/__init__.py
|
pycircle/presentations
|
e2280ddb7c9d94c54242b2955c05fd1327667cfa
|
[
"Apache-2.0"
] | null | null | null |
wprowadzenie_3/module/dir1/__init__.py
|
pycircle/presentations
|
e2280ddb7c9d94c54242b2955c05fd1327667cfa
|
[
"Apache-2.0"
] | null | null | null |
print "dir1"
y = 2
| 6.333333
| 12
| 0.578947
| 4
| 19
| 2.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 0.263158
| 19
| 2
| 13
| 9.5
| 0.642857
| 0
| 0
| 0
| 0
| 0
| 0.210526
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
09a15ea10935b12035a0f0c0c08d428fd92c0224
| 198
|
py
|
Python
|
tests/utils.py
|
cfhamlet/os-dbnetget
|
09643e5fc8d8f912199cd54e7be1eeb17154be4b
|
[
"MIT"
] | 10
|
2018-12-04T14:23:41.000Z
|
2020-11-27T07:03:05.000Z
|
tests/utils.py
|
cfhamlet/os-dbnetget
|
09643e5fc8d8f912199cd54e7be1eeb17154be4b
|
[
"MIT"
] | 1
|
2018-12-14T13:03:37.000Z
|
2018-12-14T13:03:37.000Z
|
tests/utils.py
|
cfhamlet/os-dbnetget
|
09643e5fc8d8f912199cd54e7be1eeb17154be4b
|
[
"MIT"
] | 1
|
2019-03-01T06:29:48.000Z
|
2019-03-01T06:29:48.000Z
|
import socket
from contextlib import closing
def unused_port():
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.bind(('', 0))
return s.getsockname()[1]
| 22
| 73
| 0.676768
| 28
| 198
| 4.678571
| 0.714286
| 0.183206
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012579
| 0.19697
| 198
| 8
| 74
| 24.75
| 0.811321
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
1128fb626845a11cc50bf2b592ec0d5c8975d4ef
| 77
|
py
|
Python
|
icevision/models/torchvision/mask_rcnn/lightning/__init__.py
|
bluseking/-first-agnostic-computer-vision-framework-to-offer-a-curated-collection-with-hundreds-of-high-qualit
|
2d91eacfab7fcaf09c93352f1e7816ccb2c252b9
|
[
"Apache-2.0"
] | 580
|
2020-09-10T06:29:57.000Z
|
2022-03-29T19:34:54.000Z
|
icevision/models/torchvision/mask_rcnn/lightning/__init__.py
|
bluseking/-first-agnostic-computer-vision-framework-to-offer-a-curated-collection-with-hundreds-of-high-qualit
|
2d91eacfab7fcaf09c93352f1e7816ccb2c252b9
|
[
"Apache-2.0"
] | 691
|
2020-09-05T03:08:34.000Z
|
2022-03-31T23:47:06.000Z
|
icevision/models/torchvision/mask_rcnn/lightning/__init__.py
|
bluseking/-first-agnostic-computer-vision-framework-to-offer-a-curated-collection-with-hundreds-of-high-qualit
|
2d91eacfab7fcaf09c93352f1e7816ccb2c252b9
|
[
"Apache-2.0"
] | 105
|
2020-09-09T10:41:35.000Z
|
2022-03-25T17:16:49.000Z
|
from icevision.models.torchvision.mask_rcnn.lightning.model_adapter import *
| 38.5
| 76
| 0.87013
| 10
| 77
| 6.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.051948
| 77
| 1
| 77
| 77
| 0.890411
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
1144c2536a11ab317c969a701f57d229dec35643
| 73
|
py
|
Python
|
dash-app/callbacks/symbol_chart_cb.py
|
traderpy/trade-simulator
|
b5fefb974c703a09a961debb3483bb9130f73ae0
|
[
"Apache-2.0"
] | null | null | null |
dash-app/callbacks/symbol_chart_cb.py
|
traderpy/trade-simulator
|
b5fefb974c703a09a961debb3483bb9130f73ae0
|
[
"Apache-2.0"
] | null | null | null |
dash-app/callbacks/symbol_chart_cb.py
|
traderpy/trade-simulator
|
b5fefb974c703a09a961debb3483bb9130f73ae0
|
[
"Apache-2.0"
] | null | null | null |
from app import app
from dash.dependencies import Input, Output, State
| 14.6
| 50
| 0.794521
| 11
| 73
| 5.272727
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.164384
| 73
| 4
| 51
| 18.25
| 0.95082
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
1153e18af202defcab08f046949f0fa14834ca1a
| 102
|
py
|
Python
|
lab5/lab/zad7.py
|
BartlomiejRasztabiga/PIPR
|
2d0efd57b3b84855b5a2de335493100d2682d292
|
[
"MIT"
] | null | null | null |
lab5/lab/zad7.py
|
BartlomiejRasztabiga/PIPR
|
2d0efd57b3b84855b5a2de335493100d2682d292
|
[
"MIT"
] | null | null | null |
lab5/lab/zad7.py
|
BartlomiejRasztabiga/PIPR
|
2d0efd57b3b84855b5a2de335493100d2682d292
|
[
"MIT"
] | null | null | null |
from itertools import cycle
def cycle_iteration(lst):
cycled_list = []
return cycled_list
| 11.333333
| 27
| 0.715686
| 13
| 102
| 5.384615
| 0.769231
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.22549
| 102
| 8
| 28
| 12.75
| 0.886076
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
feea42c92a784f0a25c26978368fc65bc3364c90
| 251
|
py
|
Python
|
mezzanine_fluent_pages/mezzanine_layout_page/apps.py
|
sjdines/mezzanine-fluent-pages
|
43c804756acc9ba039846d2d9d6584fed3837f94
|
[
"BSD-2-Clause"
] | 1
|
2016-05-04T12:05:29.000Z
|
2016-05-04T12:05:29.000Z
|
mezzanine_fluent_pages/mezzanine_layout_page/apps.py
|
sjdines/mezzanine-fluent-pages
|
43c804756acc9ba039846d2d9d6584fed3837f94
|
[
"BSD-2-Clause"
] | null | null | null |
mezzanine_fluent_pages/mezzanine_layout_page/apps.py
|
sjdines/mezzanine-fluent-pages
|
43c804756acc9ba039846d2d9d6584fed3837f94
|
[
"BSD-2-Clause"
] | null | null | null |
from django.apps import AppConfig
class FluentMezzanineLayoutPageConfig(AppConfig):
"""
App configuration for `mezzanine_layout_page` app.
"""
label = 'mezzanine_layout_page'
name = 'mezzanine_fluent_pages.mezzanine_layout_page'
| 25.1
| 57
| 0.756972
| 26
| 251
| 7
| 0.653846
| 0.247253
| 0.313187
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.163347
| 251
| 9
| 58
| 27.888889
| 0.866667
| 0.199203
| 0
| 0
| 0
| 0
| 0.351351
| 0.351351
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
feeedce891454972a2233cffbed3e0ee703cdd8a
| 63
|
py
|
Python
|
tests/test_workbook.py
|
ErikKBethke/tableau-workbook-xml
|
8c728b7a415c99cda5234c6dc70eb51ac65c5d0a
|
[
"MIT"
] | null | null | null |
tests/test_workbook.py
|
ErikKBethke/tableau-workbook-xml
|
8c728b7a415c99cda5234c6dc70eb51ac65c5d0a
|
[
"MIT"
] | null | null | null |
tests/test_workbook.py
|
ErikKBethke/tableau-workbook-xml
|
8c728b7a415c99cda5234c6dc70eb51ac65c5d0a
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from tableauxml import workbook
| 15.75
| 31
| 0.857143
| 8
| 63
| 6.75
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 63
| 3
| 32
| 21
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
3a35ea7c1fe523e8dc32d509cd07e913a3f6c685
| 108
|
py
|
Python
|
shepherd/storage/__init__.py
|
iterait/shepherd
|
0847c9885584378dd68a48c40d03f9bb02b2b57c
|
[
"MIT"
] | 5
|
2018-10-13T19:03:07.000Z
|
2019-02-25T06:44:27.000Z
|
shepherd/storage/__init__.py
|
iterait/shepherd
|
0847c9885584378dd68a48c40d03f9bb02b2b57c
|
[
"MIT"
] | 62
|
2018-09-13T08:03:39.000Z
|
2022-01-03T09:05:54.000Z
|
shepherd/storage/__init__.py
|
iterait/shepherd
|
0847c9885584378dd68a48c40d03f9bb02b2b57c
|
[
"MIT"
] | null | null | null |
from .storage import Storage
from .minio_storage import MinioStorage
__all__ = ['Storage', 'MinioStorage']
| 21.6
| 39
| 0.787037
| 12
| 108
| 6.666667
| 0.5
| 0.325
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12037
| 108
| 4
| 40
| 27
| 0.842105
| 0
| 0
| 0
| 0
| 0
| 0.175926
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
3a508c8b55a6976f9ff734873320ff12d1b6ac88
| 113
|
py
|
Python
|
mysite/ChainLicense/admin.py
|
Hwieun/ChainLicense
|
35d552ff1cfd056584a54b946999ff287e87d8ad
|
[
"Apache-2.0"
] | 2
|
2019-09-23T01:55:46.000Z
|
2019-11-08T16:33:47.000Z
|
mysite/ChainLicense/admin.py
|
Hwieun/ChainLicense
|
35d552ff1cfd056584a54b946999ff287e87d8ad
|
[
"Apache-2.0"
] | 1
|
2019-10-07T01:11:55.000Z
|
2019-10-07T01:11:55.000Z
|
mysite/ChainLicense/admin.py
|
Hwieun/ChainLicense
|
35d552ff1cfd056584a54b946999ff287e87d8ad
|
[
"Apache-2.0"
] | 1
|
2019-09-24T06:22:30.000Z
|
2019-09-24T06:22:30.000Z
|
from django.contrib import admin
# Register your models here.
from .models import Data
admin.site.register(Data)
| 22.6
| 32
| 0.80531
| 17
| 113
| 5.352941
| 0.647059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.123894
| 113
| 5
| 33
| 22.6
| 0.919192
| 0.230089
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
28b0a3363da68d27ab8739664350f5ac88ea94b4
| 56,308
|
py
|
Python
|
tccli/services/youmall/youmall_client.py
|
zyh911/tencentcloud-cli
|
dfc5dbd660d4c60d265921c4edc630091478fc41
|
[
"Apache-2.0"
] | null | null | null |
tccli/services/youmall/youmall_client.py
|
zyh911/tencentcloud-cli
|
dfc5dbd660d4c60d265921c4edc630091478fc41
|
[
"Apache-2.0"
] | null | null | null |
tccli/services/youmall/youmall_client.py
|
zyh911/tencentcloud-cli
|
dfc5dbd660d4c60d265921c4edc630091478fc41
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import json
import tccli.options_define as OptionsDefine
import tccli.format_output as FormatOutput
from tccli.nice_command import NiceCommand
import tccli.error_msg as ErrorMsg
import tccli.help_template as HelpTemplate
from tccli import __version__
from tccli.utils import Utils
from tccli.configure import Configure
from tencentcloud.common import credential
from tencentcloud.common.profile.http_profile import HttpProfile
from tencentcloud.common.profile.client_profile import ClientProfile
from tencentcloud.youmall.v20180228 import youmall_client as youmall_client_v20180228
from tencentcloud.youmall.v20180228 import models as models_v20180228
from tccli.services.youmall import v20180228
from tccli.services.youmall.v20180228 import help as v20180228_help
def doDescribeCameraPerson(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeCameraPerson", g_param[OptionsDefine.Version])
return
param = {
"CompanyId": argv.get("--CompanyId"),
"ShopId": Utils.try_to_json(argv, "--ShopId"),
"CameraId": Utils.try_to_json(argv, "--CameraId"),
"StartTime": Utils.try_to_json(argv, "--StartTime"),
"EndTime": Utils.try_to_json(argv, "--EndTime"),
"PosId": argv.get("--PosId"),
"Num": Utils.try_to_json(argv, "--Num"),
"IsNeedPic": Utils.try_to_json(argv, "--IsNeedPic"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.YoumallClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeCameraPersonRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeCameraPerson(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribePersonInfo(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribePersonInfo", g_param[OptionsDefine.Version])
return
param = {
"CompanyId": argv.get("--CompanyId"),
"ShopId": Utils.try_to_json(argv, "--ShopId"),
"StartPersonId": Utils.try_to_json(argv, "--StartPersonId"),
"Offset": Utils.try_to_json(argv, "--Offset"),
"Limit": Utils.try_to_json(argv, "--Limit"),
"PictureExpires": Utils.try_to_json(argv, "--PictureExpires"),
"PersonType": Utils.try_to_json(argv, "--PersonType"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.YoumallClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribePersonInfoRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribePersonInfo(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeZoneTrafficInfo(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeZoneTrafficInfo", g_param[OptionsDefine.Version])
return
param = {
"CompanyId": argv.get("--CompanyId"),
"ShopId": Utils.try_to_json(argv, "--ShopId"),
"StartDate": argv.get("--StartDate"),
"EndDate": argv.get("--EndDate"),
"Offset": Utils.try_to_json(argv, "--Offset"),
"Limit": Utils.try_to_json(argv, "--Limit"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.YoumallClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeZoneTrafficInfoRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeZoneTrafficInfo(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeZoneFlowAgeInfoByZoneId(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeZoneFlowAgeInfoByZoneId", g_param[OptionsDefine.Version])
return
param = {
"CompanyId": argv.get("--CompanyId"),
"ShopId": Utils.try_to_json(argv, "--ShopId"),
"ZoneId": Utils.try_to_json(argv, "--ZoneId"),
"StartDate": argv.get("--StartDate"),
"EndDate": argv.get("--EndDate"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.YoumallClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeZoneFlowAgeInfoByZoneIdRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeZoneFlowAgeInfoByZoneId(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doRegisterCallback(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("RegisterCallback", g_param[OptionsDefine.Version])
return
param = {
"CompanyId": argv.get("--CompanyId"),
"BackUrl": argv.get("--BackUrl"),
"Time": Utils.try_to_json(argv, "--Time"),
"NeedFacePic": Utils.try_to_json(argv, "--NeedFacePic"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.YoumallClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.RegisterCallbackRequest()
model.from_json_string(json.dumps(param))
rsp = client.RegisterCallback(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeZoneFlowGenderAvrStayTimeByZoneId(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeZoneFlowGenderAvrStayTimeByZoneId", g_param[OptionsDefine.Version])
return
param = {
"CompanyId": argv.get("--CompanyId"),
"ShopId": Utils.try_to_json(argv, "--ShopId"),
"ZoneId": Utils.try_to_json(argv, "--ZoneId"),
"StartDate": argv.get("--StartDate"),
"EndDate": argv.get("--EndDate"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.YoumallClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeZoneFlowGenderAvrStayTimeByZoneIdRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeZoneFlowGenderAvrStayTimeByZoneId(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeZoneFlowAndStayTime(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeZoneFlowAndStayTime", g_param[OptionsDefine.Version])
return
param = {
"CompanyId": argv.get("--CompanyId"),
"ShopId": Utils.try_to_json(argv, "--ShopId"),
"StartDate": argv.get("--StartDate"),
"EndDate": argv.get("--EndDate"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.YoumallClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeZoneFlowAndStayTimeRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeZoneFlowAndStayTime(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribePersonVisitInfo(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribePersonVisitInfo", g_param[OptionsDefine.Version])
return
param = {
"CompanyId": argv.get("--CompanyId"),
"ShopId": Utils.try_to_json(argv, "--ShopId"),
"Offset": Utils.try_to_json(argv, "--Offset"),
"Limit": Utils.try_to_json(argv, "--Limit"),
"StartDate": argv.get("--StartDate"),
"EndDate": argv.get("--EndDate"),
"PictureExpires": Utils.try_to_json(argv, "--PictureExpires"),
"StartDateTime": argv.get("--StartDateTime"),
"EndDateTime": argv.get("--EndDateTime"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.YoumallClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribePersonVisitInfoRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribePersonVisitInfo(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeZoneFlowHourlyByZoneId(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeZoneFlowHourlyByZoneId", g_param[OptionsDefine.Version])
return
param = {
"CompanyId": argv.get("--CompanyId"),
"ShopId": Utils.try_to_json(argv, "--ShopId"),
"ZoneId": Utils.try_to_json(argv, "--ZoneId"),
"StartDate": argv.get("--StartDate"),
"EndDate": argv.get("--EndDate"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.YoumallClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeZoneFlowHourlyByZoneIdRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeZoneFlowHourlyByZoneId(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeShopInfo(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeShopInfo", g_param[OptionsDefine.Version])
return
param = {
"Offset": Utils.try_to_json(argv, "--Offset"),
"Limit": Utils.try_to_json(argv, "--Limit"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.YoumallClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeShopInfoRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeShopInfo(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeFaceIdByTempId(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeFaceIdByTempId", g_param[OptionsDefine.Version])
return
param = {
"CompanyId": argv.get("--CompanyId"),
"ShopId": Utils.try_to_json(argv, "--ShopId"),
"TempId": argv.get("--TempId"),
"CameraId": Utils.try_to_json(argv, "--CameraId"),
"PosId": argv.get("--PosId"),
"PictureExpires": Utils.try_to_json(argv, "--PictureExpires"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.YoumallClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeFaceIdByTempIdRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeFaceIdByTempId(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeZoneFlowGenderInfoByZoneId(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeZoneFlowGenderInfoByZoneId", g_param[OptionsDefine.Version])
return
param = {
"CompanyId": argv.get("--CompanyId"),
"ShopId": Utils.try_to_json(argv, "--ShopId"),
"ZoneId": Utils.try_to_json(argv, "--ZoneId"),
"StartDate": argv.get("--StartDate"),
"EndDate": argv.get("--EndDate"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.YoumallClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeZoneFlowGenderInfoByZoneIdRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeZoneFlowGenderInfoByZoneId(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeShopTrafficInfo(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeShopTrafficInfo", g_param[OptionsDefine.Version])
return
param = {
"CompanyId": argv.get("--CompanyId"),
"ShopId": Utils.try_to_json(argv, "--ShopId"),
"StartDate": argv.get("--StartDate"),
"EndDate": argv.get("--EndDate"),
"Offset": Utils.try_to_json(argv, "--Offset"),
"Limit": Utils.try_to_json(argv, "--Limit"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.YoumallClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeShopTrafficInfoRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeShopTrafficInfo(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribePersonInfoByFacePicture(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribePersonInfoByFacePicture", g_param[OptionsDefine.Version])
return
param = {
"CompanyId": argv.get("--CompanyId"),
"ShopId": Utils.try_to_json(argv, "--ShopId"),
"Picture": argv.get("--Picture"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.YoumallClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribePersonInfoByFacePictureRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribePersonInfoByFacePicture(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateFacePicture(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("CreateFacePicture", g_param[OptionsDefine.Version])
return
param = {
"CompanyId": argv.get("--CompanyId"),
"PersonType": Utils.try_to_json(argv, "--PersonType"),
"Picture": argv.get("--Picture"),
"PictureName": argv.get("--PictureName"),
"ShopId": Utils.try_to_json(argv, "--ShopId"),
"IsForceUpload": Utils.try_to_json(argv, "--IsForceUpload"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.YoumallClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateFacePictureRequest()
model.from_json_string(json.dumps(param))
rsp = client.CreateFacePicture(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateAccount(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("CreateAccount", g_param[OptionsDefine.Version])
return
param = {
"CompanyId": argv.get("--CompanyId"),
"Name": argv.get("--Name"),
"Password": argv.get("--Password"),
"ShopCode": argv.get("--ShopCode"),
"Remark": argv.get("--Remark"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.YoumallClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateAccountRequest()
model.from_json_string(json.dumps(param))
rsp = client.CreateAccount(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeShopHourTrafficInfo(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeShopHourTrafficInfo", g_param[OptionsDefine.Version])
return
param = {
"CompanyId": argv.get("--CompanyId"),
"ShopId": Utils.try_to_json(argv, "--ShopId"),
"StartDate": argv.get("--StartDate"),
"EndDate": argv.get("--EndDate"),
"Offset": Utils.try_to_json(argv, "--Offset"),
"Limit": Utils.try_to_json(argv, "--Limit"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.YoumallClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeShopHourTrafficInfoRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeShopHourTrafficInfo(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribePersonTrace(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribePersonTrace", g_param[OptionsDefine.Version])
return
param = {
"MallId": argv.get("--MallId"),
"PersonId": argv.get("--PersonId"),
"StartTime": argv.get("--StartTime"),
"EndTime": argv.get("--EndTime"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.YoumallClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribePersonTraceRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribePersonTrace(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeZoneFlowDailyByZoneId(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeZoneFlowDailyByZoneId", g_param[OptionsDefine.Version])
return
param = {
"CompanyId": argv.get("--CompanyId"),
"ShopId": Utils.try_to_json(argv, "--ShopId"),
"ZoneId": Utils.try_to_json(argv, "--ZoneId"),
"StartDate": argv.get("--StartDate"),
"EndDate": argv.get("--EndDate"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.YoumallClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeZoneFlowDailyByZoneIdRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeZoneFlowDailyByZoneId(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribePersonArrivedMall(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribePersonArrivedMall", g_param[OptionsDefine.Version])
return
param = {
"MallId": argv.get("--MallId"),
"PersonId": argv.get("--PersonId"),
"StartTime": argv.get("--StartTime"),
"EndTime": argv.get("--EndTime"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.YoumallClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribePersonArrivedMallRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribePersonArrivedMall(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeClusterPersonArrivedMall(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeClusterPersonArrivedMall", g_param[OptionsDefine.Version])
return
param = {
"MallId": argv.get("--MallId"),
"PersonId": argv.get("--PersonId"),
"StartTime": argv.get("--StartTime"),
"EndTime": argv.get("--EndTime"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.YoumallClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeClusterPersonArrivedMallRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeClusterPersonArrivedMall(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribePersonTraceDetail(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribePersonTraceDetail", g_param[OptionsDefine.Version])
return
param = {
"MallId": argv.get("--MallId"),
"PersonId": argv.get("--PersonId"),
"TraceId": argv.get("--TraceId"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.YoumallClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribePersonTraceDetailRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribePersonTraceDetail(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyPersonType(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("ModifyPersonType", g_param[OptionsDefine.Version])
return
param = {
"CompanyId": argv.get("--CompanyId"),
"ShopId": Utils.try_to_json(argv, "--ShopId"),
"PersonId": Utils.try_to_json(argv, "--PersonId"),
"PersonType": Utils.try_to_json(argv, "--PersonType"),
"PersonSubType": Utils.try_to_json(argv, "--PersonSubType"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.YoumallClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyPersonTypeRequest()
model.from_json_string(json.dumps(param))
rsp = client.ModifyPersonType(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyPersonFeatureInfo(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("ModifyPersonFeatureInfo", g_param[OptionsDefine.Version])
return
param = {
"CompanyId": argv.get("--CompanyId"),
"PersonId": Utils.try_to_json(argv, "--PersonId"),
"Picture": argv.get("--Picture"),
"PictureName": argv.get("--PictureName"),
"PersonType": Utils.try_to_json(argv, "--PersonType"),
"ShopId": Utils.try_to_json(argv, "--ShopId"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.YoumallClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyPersonFeatureInfoRequest()
model.from_json_string(json.dumps(param))
rsp = client.ModifyPersonFeatureInfo(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeHistoryNetworkInfo(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeHistoryNetworkInfo", g_param[OptionsDefine.Version])
return
param = {
"Time": Utils.try_to_json(argv, "--Time"),
"CompanyId": argv.get("--CompanyId"),
"ShopId": Utils.try_to_json(argv, "--ShopId"),
"StartDay": argv.get("--StartDay"),
"EndDay": argv.get("--EndDay"),
"Limit": Utils.try_to_json(argv, "--Limit"),
"Offset": Utils.try_to_json(argv, "--Offset"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.YoumallClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeHistoryNetworkInfoRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeHistoryNetworkInfo(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeNetworkInfo(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeNetworkInfo", g_param[OptionsDefine.Version])
return
param = {
"Time": Utils.try_to_json(argv, "--Time"),
"CompanyId": argv.get("--CompanyId"),
"ShopId": Utils.try_to_json(argv, "--ShopId"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.YoumallClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeNetworkInfoRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeNetworkInfo(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeletePersonFeature(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DeletePersonFeature", g_param[OptionsDefine.Version])
return
param = {
"CompanyId": argv.get("--CompanyId"),
"ShopId": Utils.try_to_json(argv, "--ShopId"),
"PersonId": Utils.try_to_json(argv, "--PersonId"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.YoumallClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeletePersonFeatureRequest()
model.from_json_string(json.dumps(param))
rsp = client.DeletePersonFeature(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyPersonTagInfo(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("ModifyPersonTagInfo", g_param[OptionsDefine.Version])
return
param = {
"CompanyId": argv.get("--CompanyId"),
"ShopId": Utils.try_to_json(argv, "--ShopId"),
"Tags": Utils.try_to_json(argv, "--Tags"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.YoumallClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyPersonTagInfoRequest()
model.from_json_string(json.dumps(param))
rsp = client.ModifyPersonTagInfo(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribePerson(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribePerson", g_param[OptionsDefine.Version])
return
param = {
"MallId": argv.get("--MallId"),
"Offset": Utils.try_to_json(argv, "--Offset"),
"Limit": Utils.try_to_json(argv, "--Limit"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.YoumallClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribePersonRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribePerson(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeClusterPersonTrace(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeClusterPersonTrace", g_param[OptionsDefine.Version])
return
param = {
"MallId": argv.get("--MallId"),
"PersonId": argv.get("--PersonId"),
"StartTime": argv.get("--StartTime"),
"EndTime": argv.get("--EndTime"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.YoumallClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeClusterPersonTraceRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeClusterPersonTrace(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeTrajectoryData(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeTrajectoryData", g_param[OptionsDefine.Version])
return
param = {
"CompanyId": argv.get("--CompanyId"),
"ShopId": Utils.try_to_json(argv, "--ShopId"),
"StartDate": argv.get("--StartDate"),
"EndDate": argv.get("--EndDate"),
"Limit": Utils.try_to_json(argv, "--Limit"),
"Gender": Utils.try_to_json(argv, "--Gender"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.YoumallClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeTrajectoryDataRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeTrajectoryData(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
CLIENT_MAP = {
"v20180228": youmall_client_v20180228,
}
MODELS_MAP = {
"v20180228": models_v20180228,
}
ACTION_MAP = {
"DescribeCameraPerson": doDescribeCameraPerson,
"DescribePersonInfo": doDescribePersonInfo,
"DescribeZoneTrafficInfo": doDescribeZoneTrafficInfo,
"DescribeZoneFlowAgeInfoByZoneId": doDescribeZoneFlowAgeInfoByZoneId,
"RegisterCallback": doRegisterCallback,
"DescribeZoneFlowGenderAvrStayTimeByZoneId": doDescribeZoneFlowGenderAvrStayTimeByZoneId,
"DescribeZoneFlowAndStayTime": doDescribeZoneFlowAndStayTime,
"DescribePersonVisitInfo": doDescribePersonVisitInfo,
"DescribeZoneFlowHourlyByZoneId": doDescribeZoneFlowHourlyByZoneId,
"DescribeShopInfo": doDescribeShopInfo,
"DescribeFaceIdByTempId": doDescribeFaceIdByTempId,
"DescribeZoneFlowGenderInfoByZoneId": doDescribeZoneFlowGenderInfoByZoneId,
"DescribeShopTrafficInfo": doDescribeShopTrafficInfo,
"DescribePersonInfoByFacePicture": doDescribePersonInfoByFacePicture,
"CreateFacePicture": doCreateFacePicture,
"CreateAccount": doCreateAccount,
"DescribeShopHourTrafficInfo": doDescribeShopHourTrafficInfo,
"DescribePersonTrace": doDescribePersonTrace,
"DescribeZoneFlowDailyByZoneId": doDescribeZoneFlowDailyByZoneId,
"DescribePersonArrivedMall": doDescribePersonArrivedMall,
"DescribeClusterPersonArrivedMall": doDescribeClusterPersonArrivedMall,
"DescribePersonTraceDetail": doDescribePersonTraceDetail,
"ModifyPersonType": doModifyPersonType,
"ModifyPersonFeatureInfo": doModifyPersonFeatureInfo,
"DescribeHistoryNetworkInfo": doDescribeHistoryNetworkInfo,
"DescribeNetworkInfo": doDescribeNetworkInfo,
"DeletePersonFeature": doDeletePersonFeature,
"ModifyPersonTagInfo": doModifyPersonTagInfo,
"DescribePerson": doDescribePerson,
"DescribeClusterPersonTrace": doDescribeClusterPersonTrace,
"DescribeTrajectoryData": doDescribeTrajectoryData,
}
AVAILABLE_VERSION_LIST = [
v20180228.version,
]
AVAILABLE_VERSIONS = {
'v' + v20180228.version.replace('-', ''): {"help": v20180228_help.INFO,"desc": v20180228_help.DESC},
}
def youmall_action(argv, arglist):
if "help" in argv:
versions = sorted(AVAILABLE_VERSIONS.keys())
opt_v = "--" + OptionsDefine.Version
version = versions[-1]
if opt_v in argv:
version = 'v' + argv[opt_v].replace('-', '')
if version not in versions:
print("available versions: %s" % " ".join(AVAILABLE_VERSION_LIST))
return
action_str = ""
docs = AVAILABLE_VERSIONS[version]["help"]
desc = AVAILABLE_VERSIONS[version]["desc"]
for action, info in docs.items():
action_str += " %s\n" % action
action_str += Utils.split_str(" ", info["desc"], 120)
helpstr = HelpTemplate.SERVICE % {"name": "youmall", "desc": desc, "actions": action_str}
print(helpstr)
else:
print(ErrorMsg.FEW_ARG)
def version_merge():
help_merge = {}
for v in AVAILABLE_VERSIONS:
for action in AVAILABLE_VERSIONS[v]["help"]:
if action not in help_merge:
help_merge[action] = {}
help_merge[action]["cb"] = ACTION_MAP[action]
help_merge[action]["params"] = []
for param in AVAILABLE_VERSIONS[v]["help"][action]["params"]:
if param["name"] not in help_merge[action]["params"]:
help_merge[action]["params"].append(param["name"])
return help_merge
def register_arg(command):
cmd = NiceCommand("youmall", youmall_action)
command.reg_cmd(cmd)
cmd.reg_opt("help", "bool")
cmd.reg_opt(OptionsDefine.Version, "string")
help_merge = version_merge()
for actionName, action in help_merge.items():
c = NiceCommand(actionName, action["cb"])
cmd.reg_cmd(c)
c.reg_opt("help", "bool")
for param in action["params"]:
c.reg_opt("--" + param, "string")
for opt in OptionsDefine.ACTION_GLOBAL_OPT:
stropt = "--" + opt
c.reg_opt(stropt, "string")
def parse_global_arg(argv):
params = {}
for opt in OptionsDefine.ACTION_GLOBAL_OPT:
stropt = "--" + opt
if stropt in argv:
params[opt] = argv[stropt]
else:
params[opt] = None
if params[OptionsDefine.Version]:
params[OptionsDefine.Version] = "v" + params[OptionsDefine.Version].replace('-', '')
config_handle = Configure()
profile = config_handle.profile
if ("--" + OptionsDefine.Profile) in argv:
profile = argv[("--" + OptionsDefine.Profile)]
is_conexist, conf_path = config_handle._profile_existed(profile + "." + config_handle.configure)
is_creexist, cred_path = config_handle._profile_existed(profile + "." + config_handle.credential)
config = {}
cred = {}
if is_conexist:
config = config_handle._load_json_msg(conf_path)
if is_creexist:
cred = config_handle._load_json_msg(cred_path)
if os.environ.get(OptionsDefine.ENV_SECRET_ID):
cred[OptionsDefine.SecretId] = os.environ.get(OptionsDefine.ENV_SECRET_ID)
if os.environ.get(OptionsDefine.ENV_SECRET_KEY):
cred[OptionsDefine.SecretKey] = os.environ.get(OptionsDefine.ENV_SECRET_KEY)
if os.environ.get(OptionsDefine.ENV_REGION):
config[OptionsDefine.Region] = os.environ.get(OptionsDefine.ENV_REGION)
for param in params.keys():
if param == OptionsDefine.Version:
continue
if params[param] is None:
if param in [OptionsDefine.SecretKey, OptionsDefine.SecretId]:
if param in cred:
params[param] = cred[param]
else:
raise Exception("%s is invalid" % param)
else:
if param in config:
params[param] = config[param]
elif param == OptionsDefine.Region:
raise Exception("%s is invalid" % OptionsDefine.Region)
try:
if params[OptionsDefine.Version] is None:
version = config["youmall"][OptionsDefine.Version]
params[OptionsDefine.Version] = "v" + version.replace('-', '')
if params[OptionsDefine.Endpoint] is None:
params[OptionsDefine.Endpoint] = config["youmall"][OptionsDefine.Endpoint]
except Exception as err:
raise Exception("config file:%s error, %s" % (conf_path, str(err)))
versions = sorted(AVAILABLE_VERSIONS.keys())
if params[OptionsDefine.Version] not in versions:
raise Exception("available versions: %s" % " ".join(AVAILABLE_VERSION_LIST))
return params
def show_help(action, version):
docs = AVAILABLE_VERSIONS[version]["help"][action]
desc = AVAILABLE_VERSIONS[version]["desc"]
docstr = ""
for param in docs["params"]:
docstr += " %s\n" % ("--" + param["name"])
docstr += Utils.split_str(" ", param["desc"], 120)
helpmsg = HelpTemplate.ACTION % {"name": action, "service": "youmall", "desc": desc, "params": docstr}
print(helpmsg)
def get_actions_info():
config = Configure()
new_version = max(AVAILABLE_VERSIONS.keys())
version = new_version
try:
profile = config._load_json_msg(os.path.join(config.cli_path, "default.configure"))
version = profile["youmall"]["version"]
version = "v" + version.replace('-', '')
except Exception:
pass
if version not in AVAILABLE_VERSIONS.keys():
version = new_version
return AVAILABLE_VERSIONS[version]["help"]
| 41.402941
| 106
| 0.692584
| 6,096
| 56,308
| 6.200951
| 0.046424
| 0.059046
| 0.171398
| 0.063967
| 0.784477
| 0.766964
| 0.759636
| 0.741541
| 0.732547
| 0.695722
| 0
| 0.00817
| 0.182656
| 56,308
| 1,359
| 107
| 41.433407
| 0.81318
| 0.005878
| 0
| 0.689627
| 0
| 0
| 0.098634
| 0.01859
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030705
| false
| 0.00166
| 0.014108
| 0
| 0.073859
| 0.00332
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
28c57a665d6419ff20c82f8548157eec65e24e3b
| 30
|
py
|
Python
|
steinerpy/library/logger/__init__.py
|
rooshm/steinerpy
|
777b55fa94527365322ba5fa675c8be090333715
|
[
"MIT"
] | 3
|
2021-06-10T16:46:20.000Z
|
2022-02-11T14:24:15.000Z
|
steinerpy/library/logger/__init__.py
|
rooshm/steinerpy
|
777b55fa94527365322ba5fa675c8be090333715
|
[
"MIT"
] | 12
|
2021-03-31T03:31:24.000Z
|
2021-11-18T21:51:18.000Z
|
steinerpy/library/logger/__init__.py
|
rooshm/steinerpy
|
777b55fa94527365322ba5fa675c8be090333715
|
[
"MIT"
] | 1
|
2021-06-13T15:01:24.000Z
|
2021-06-13T15:01:24.000Z
|
from .mylogger import MyLogger
| 30
| 30
| 0.866667
| 4
| 30
| 6.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 30
| 1
| 30
| 30
| 0.962963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
e9387dcf4fd910eeb62f9a6875e3d2d3f51e8bb1
| 45
|
py
|
Python
|
tests/__init__.py
|
latkins/torch_keypoints
|
7e0ad2618cef18a80eb6cf96acf624b6f67e3d51
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
latkins/torch_keypoints
|
7e0ad2618cef18a80eb6cf96acf624b6f67e3d51
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
latkins/torch_keypoints
|
7e0ad2618cef18a80eb6cf96acf624b6f67e3d51
|
[
"MIT"
] | null | null | null |
"""Unit test package for torch_keypoints."""
| 22.5
| 44
| 0.733333
| 6
| 45
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 45
| 1
| 45
| 45
| 0.8
| 0.844444
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
e9420aab13a61de06e01f35c46ecb636c7338660
| 2,127
|
py
|
Python
|
iptf_version/old_readsort.py
|
emirkmo/GeminiReductionsZTF
|
ba16ba768ef004641210c61166fcb0e3e6905845
|
[
"MIT"
] | null | null | null |
iptf_version/old_readsort.py
|
emirkmo/GeminiReductionsZTF
|
ba16ba768ef004641210c61166fcb0e3e6905845
|
[
"MIT"
] | null | null | null |
iptf_version/old_readsort.py
|
emirkmo/GeminiReductionsZTF
|
ba16ba768ef004641210c61166fcb0e3e6905845
|
[
"MIT"
] | null | null | null |
import astropy.io.fits as fits
import glob
import subprocess
#cmd='!gethead N*.fits -x 0 UT OBJECT GRATING EXPTIME | grep R400 > R400+_G5305.lst'
#subprocess.Popen(cmd).wait()
#cmd='!gethead N*.fits -x 0 UT OBJECT GRATING EXPTIME | grep B600 > B600+_G5307.lst'
#subprocess.Popen(cmd).wait()
files= glob.glob('N*.fits')
#for i in xrange(len(files)):
# files[i]
f=open('R400+_G5305.lst','w')
s=open('B600+_G5307.lst','w')
w=1
i=1
j=1
k=1
for name in files:
hdulist= fits.open(name)
hdulist.close()
if hdulist[0].header['grating']=='R400+_G5305':
print>>f, name, hdulist[0].header['object']
if 'flat' in hdulist[0].header['object']:
x=open('flatr400_'+str(i)+'.txt','w')
print>>x, name
x.close()
i+=1
elif hdulist[0].header['OBSTYPE']=='OBJECT':
x=open(str(hdulist[0].header['object'])+'_r400_'+str(j)+'.txt','w')
b=open('object_r400_'+str(w)+'.txt','w')
print>>x,name
print>>b,name
b.close()
j+=1
w+=1
x.close()
elif 'Ar' in hdulist[0].header['object']:
x=open(str(hdulist[0].header['object'])+'_r400_'+str(k)+'.txt','w')
print>>x,name
k+=1
x.close()
i=1
j=1
k=1
w=1
for name in files:
hdulist= fits.open(name)
hdulist.close()
if hdulist[0].header['grating']=='B600+_G5307':
print>>s, name, hdulist[0].header['object']
if 'flat' in hdulist[0].header['object']:
x=open('flatb600_'+str(i)+'.txt','w')
print>>x, name
x.close()
i+=1
elif hdulist[0].header['OBSTYPE']=='OBJECT':
x=open(str(hdulist[0].header['object'])+'_b600_'+str(j)+'.txt','w')
b=open('object_b600_'+str(w)+'.txt','w')
print>>x,name
print>>b,name
j+=1
w+=1
x.close()
b.close()
elif 'Ar' in hdulist[0].header['object']:
x=open(str(hdulist[0].header['object'])+'_b600_'+str(k)+'.txt','w')
print>>x,name
k+=1
x.close()
f.close()
s.close()
| 25.626506
| 84
| 0.527033
| 315
| 2,127
| 3.495238
| 0.165079
| 0.101726
| 0.17802
| 0.181653
| 0.788374
| 0.742961
| 0.719346
| 0.690282
| 0.690282
| 0.690282
| 0
| 0.066496
| 0.264692
| 2,127
| 83
| 85
| 25.626506
| 0.637468
| 0.121768
| 0
| 0.6875
| 0
| 0
| 0.149758
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.046875
| null | null | 0.15625
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
3aaca7aa30a7a7b55d1e668e4b2e39c1d421a0bd
| 41
|
py
|
Python
|
tests/__init__.py
|
kajjjak/petstorelib
|
973467c7d90ad70f258d4b192cd085e96db1d12b
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
kajjjak/petstorelib
|
973467c7d90ad70f258d4b192cd085e96db1d12b
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
kajjjak/petstorelib
|
973467c7d90ad70f258d4b192cd085e96db1d12b
|
[
"MIT"
] | null | null | null |
"""Unit test package for petstorelib."""
| 20.5
| 40
| 0.707317
| 5
| 41
| 5.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121951
| 41
| 1
| 41
| 41
| 0.805556
| 0.829268
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
3ad0e668282ab74b0ff1f5989a3f7da61ce67811
| 105
|
py
|
Python
|
pmfp/entrypoint/docker_/image/__init__.py
|
Python-Tools/pmfp
|
832273890eec08e84f9c68d03f3316b2c8139133
|
[
"MIT"
] | 4
|
2017-09-15T03:38:56.000Z
|
2019-12-16T02:03:14.000Z
|
pmfp/entrypoint/docker_/image/__init__.py
|
Python-Tools/pmfp
|
832273890eec08e84f9c68d03f3316b2c8139133
|
[
"MIT"
] | 1
|
2021-04-27T10:51:42.000Z
|
2021-04-27T10:51:42.000Z
|
pmfp/entrypoint/docker_/image/__init__.py
|
Python-Tools/pmfp
|
832273890eec08e84f9c68d03f3316b2c8139133
|
[
"MIT"
] | null | null | null |
from .new import new_dockerfile
from .build_ import build_dockerimage
from .pack import pack_dockerimage
| 26.25
| 37
| 0.857143
| 15
| 105
| 5.733333
| 0.466667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 105
| 3
| 38
| 35
| 0.924731
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
aaf53dfbd48c322857cc29f35827548d0fd37544
| 174
|
py
|
Python
|
matching/generate_test_profiles.py
|
rastaman/what2017
|
6d134fe87ecdd90a333225822175f003da67fd80
|
[
"MIT"
] | null | null | null |
matching/generate_test_profiles.py
|
rastaman/what2017
|
6d134fe87ecdd90a333225822175f003da67fd80
|
[
"MIT"
] | null | null | null |
matching/generate_test_profiles.py
|
rastaman/what2017
|
6d134fe87ecdd90a333225822175f003da67fd80
|
[
"MIT"
] | null | null | null |
import pickle
import numpy as np
test_profiles = np.random.random_integers(low=0, high=100, size=(100, 8))
print test_profiles
pickle.dump(test_profiles, 'profiles.dat')
| 17.4
| 73
| 0.770115
| 28
| 174
| 4.642857
| 0.642857
| 0.276923
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.051948
| 0.114943
| 174
| 9
| 74
| 19.333333
| 0.792208
| 0
| 0
| 0
| 0
| 0
| 0.068966
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.4
| null | null | 0.2
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
aaf950a11023ec1475bf56b81ae1ad5b6d36ed38
| 248
|
py
|
Python
|
mywebsite/home/admin.py
|
nghialvc/Mywebsite
|
50c8a1ae27f53a6c02964113ae1830313d1c3e9a
|
[
"Apache-2.0"
] | null | null | null |
mywebsite/home/admin.py
|
nghialvc/Mywebsite
|
50c8a1ae27f53a6c02964113ae1830313d1c3e9a
|
[
"Apache-2.0"
] | null | null | null |
mywebsite/home/admin.py
|
nghialvc/Mywebsite
|
50c8a1ae27f53a6c02964113ae1830313d1c3e9a
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
from . import models
# Register your models here.
admin.site.register(models.MangaType)
admin.site.register(models.MangaInfo)
admin.site.register(models.ChapInfo)
admin.site.register(models.MangaContent)
| 24.8
| 41
| 0.790323
| 32
| 248
| 6.125
| 0.4375
| 0.183673
| 0.346939
| 0.469388
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.112903
| 248
| 9
| 42
| 27.555556
| 0.890909
| 0.104839
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
c90a64b40c6e0b89a2265c03e248a57152246175
| 1,730
|
py
|
Python
|
DomoticzAPI/tests/test_translation.py
|
wini83/Domoticz-API
|
22e1362c652db4474288e912541c01d4cbd42d56
|
[
"MIT"
] | 4
|
2020-06-22T05:13:04.000Z
|
2020-11-05T12:56:48.000Z
|
DomoticzAPI/tests/test_translation.py
|
wini83/Domoticz-API
|
22e1362c652db4474288e912541c01d4cbd42d56
|
[
"MIT"
] | 9
|
2019-02-16T11:52:57.000Z
|
2021-06-24T12:16:50.000Z
|
DomoticzAPI/tests/test_translation.py
|
wini83/Domoticz-API
|
22e1362c652db4474288e912541c01d4cbd42d56
|
[
"MIT"
] | 7
|
2018-08-25T08:12:58.000Z
|
2021-01-22T18:39:11.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import DomoticzAPI as dom
def main():
print("********************************************************************************")
print("Test script ........... : {}".format(__file__))
print("********************************************************************************")
server = dom.Server()
print("Server language: {}".format(server.language))
print(server.translation)
print("Translation language: {}".format(server.translation.language))
server.translation.language = "nl"
print("Translation language: {}".format(server.translation.language))
key = "Hours"
print("{} ({}): {}".format(
key,
server.translation.language,
server.translation.value(key)))
key = "Friday"
print("{} ({}): {}".format(
key,
server.translation.language,
server.translation.value(key)))
key = "Unkown string"
print("{} ({}): {}".format(
key,
server.translation.language,
server.translation.value(key)))
key = "Hurricane"
print("{} ({}): {}".format(
key,
server.translation.language,
server.translation.value(key)))
server.translation.language = "fr"
print("{} ({}): {}".format(
key,
server.translation.language,
server.translation.value(key)))
server.translation.language = "de"
print("{} ({}): {}".format(
key,
server.translation.language,
server.translation.value(key)))
server.translation.language = None
print("{} ({}): {}".format(
key,
server.translation.language,
server.translation.value(key)))
if __name__ == "__main__":
main()
| 31.454545
| 93
| 0.522543
| 145
| 1,730
| 6.151724
| 0.234483
| 0.400224
| 0.36435
| 0.313901
| 0.738789
| 0.719731
| 0.719731
| 0.596413
| 0.596413
| 0.596413
| 0
| 0.001474
| 0.215607
| 1,730
| 54
| 94
| 32.037037
| 0.655859
| 0.024855
| 0
| 0.666667
| 0
| 0
| 0.224926
| 0.094955
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020833
| false
| 0
| 0.020833
| 0
| 0.041667
| 0.291667
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
c96fe465fcc2d9b2a76f52ea99bf288722329c05
| 48
|
py
|
Python
|
brave/exceptions.py
|
niklasR/brave
|
5234bd9cecd6d7282cd0b50acfda44890cf4cfb8
|
[
"Apache-2.0"
] | 1
|
2018-12-04T21:58:27.000Z
|
2018-12-04T21:58:27.000Z
|
brave/exceptions.py
|
niklasR/brave
|
5234bd9cecd6d7282cd0b50acfda44890cf4cfb8
|
[
"Apache-2.0"
] | null | null | null |
brave/exceptions.py
|
niklasR/brave
|
5234bd9cecd6d7282cd0b50acfda44890cf4cfb8
|
[
"Apache-2.0"
] | 1
|
2018-12-07T12:21:08.000Z
|
2018-12-07T12:21:08.000Z
|
class InvalidConfiguration(Exception):
pass
| 16
| 38
| 0.791667
| 4
| 48
| 9.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.145833
| 48
| 2
| 39
| 24
| 0.926829
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
a31ef92fecc700993ca87bc9504b7af5bce68e1b
| 54
|
py
|
Python
|
config.py
|
jotajunior/scrapers
|
ea77551a0ea48b2191b2ddbc46c924dfe46cf7ce
|
[
"MIT"
] | null | null | null |
config.py
|
jotajunior/scrapers
|
ea77551a0ea48b2191b2ddbc46c924dfe46cf7ce
|
[
"MIT"
] | null | null | null |
config.py
|
jotajunior/scrapers
|
ea77551a0ea48b2191b2ddbc46c924dfe46cf7ce
|
[
"MIT"
] | null | null | null |
RIOT_API_KEY = 'd8c438ee-eeaa-4958-8fc8-7a8fcfcc414e'
| 27
| 53
| 0.814815
| 8
| 54
| 5.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.294118
| 0.055556
| 54
| 1
| 54
| 54
| 0.529412
| 0
| 0
| 0
| 0
| 0
| 0.666667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
a3491220151efe802eca3c89b60966509a95e433
| 104
|
py
|
Python
|
pyinstaller/hook-scipy.special.py
|
shmilee/gdpy3
|
2e007851fc87793c0038f7b1dacba729271e17a3
|
[
"MIT"
] | 4
|
2018-08-07T13:28:06.000Z
|
2021-03-08T04:31:20.000Z
|
pyinstaller/hook-scipy.special.py
|
shmilee/gdpy3
|
2e007851fc87793c0038f7b1dacba729271e17a3
|
[
"MIT"
] | null | null | null |
pyinstaller/hook-scipy.special.py
|
shmilee/gdpy3
|
2e007851fc87793c0038f7b1dacba729271e17a3
|
[
"MIT"
] | 3
|
2018-05-05T01:34:33.000Z
|
2022-03-07T15:57:10.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020 shmilee
hiddenimports = ['scipy.special.cython_special']
| 17.333333
| 48
| 0.673077
| 12
| 104
| 5.75
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.05618
| 0.144231
| 104
| 5
| 49
| 20.8
| 0.719101
| 0.461538
| 0
| 0
| 0
| 0
| 0.528302
| 0.528302
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
6e7ed6bf1925a6ae8942d5090524dd9b982a8766
| 151
|
py
|
Python
|
RoboModel/roboclaw_python/roboclaw_bareminimum.py
|
suchanekj/StarSight
|
636f7965dac582b6e7e6fb9fcf769a83a33acb43
|
[
"MIT"
] | 2
|
2020-02-04T19:10:38.000Z
|
2020-03-24T17:29:24.000Z
|
RoboModel/roboclaw_python/roboclaw_bareminimum.py
|
suchanekj/StarSight
|
636f7965dac582b6e7e6fb9fcf769a83a33acb43
|
[
"MIT"
] | 1
|
2020-02-11T21:07:46.000Z
|
2020-02-11T21:07:46.000Z
|
RoboModel/roboclaw_python/roboclaw_bareminimum.py
|
suchanekj/StarSight
|
636f7965dac582b6e7e6fb9fcf769a83a33acb43
|
[
"MIT"
] | 1
|
2020-02-04T19:07:46.000Z
|
2020-02-04T19:07:46.000Z
|
from roboclaw import Roboclaw
#Windows comport name
rc = Roboclaw("COM3",115200)
#Linux comport name
#rc = Roboclaw("/dev/ttyACM0",115200)
rc.Open()
| 16.777778
| 37
| 0.741722
| 21
| 151
| 5.333333
| 0.619048
| 0.196429
| 0.232143
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106061
| 0.125828
| 151
| 8
| 38
| 18.875
| 0.742424
| 0.490066
| 0
| 0
| 0
| 0
| 0.054054
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
6e9ad6573fac6cd4fda476613f4d38576be9191f
| 3,109
|
py
|
Python
|
test/integration/test_routing_functions.py
|
digideskio/dataservices-api
|
246ec135dbeaa3f9a52717fdac50a4ab040ce22b
|
[
"BSD-3-Clause"
] | 22
|
2016-03-11T17:33:31.000Z
|
2021-02-22T04:00:43.000Z
|
test/integration/test_routing_functions.py
|
digideskio/dataservices-api
|
246ec135dbeaa3f9a52717fdac50a4ab040ce22b
|
[
"BSD-3-Clause"
] | 338
|
2016-02-16T16:13:13.000Z
|
2022-03-30T15:50:17.000Z
|
test/integration/test_routing_functions.py
|
CartoDB/dataservices-api
|
d0f28cc002ef11df9f371d5d1fd2d0901c245f97
|
[
"BSD-3-Clause"
] | 14
|
2016-09-22T15:29:33.000Z
|
2021-02-08T03:46:40.000Z
|
from unittest import TestCase
from nose.tools import assert_raises
from nose.tools import assert_not_equal, assert_in
from ..helpers.integration_test_helper import IntegrationTestHelper
class TestRoutingFunctions(TestCase):
def setUp(self):
self.env_variables = IntegrationTestHelper.get_environment_variables()
self.sql_api_url = "{0}://{1}.{2}/api/v1/sql".format(
self.env_variables['schema'],
self.env_variables['username'],
self.env_variables['host'],
)
def test_if_select_with_routing_point_to_point_is_ok(self):
query = "SELECT duration, length, shape as the_geom " \
"FROM cdb_route_point_to_point('POINT(-3.70237112 40.41706163)'::geometry, " \
"'POINT(-3.69909883 40.41236875)'::geometry, 'car', " \
"ARRAY['mode_type=shortest']::text[])&api_key={0}".format(
self.env_variables['api_key'])
routing = IntegrationTestHelper.execute_query(self.sql_api_url, query)
assert_not_equal(routing['the_geom'], None)
def test_if_select_with_routing_point_to_point_without_api_key_raise_error(self):
query = "SELECT duration, length, shape as the_geom " \
"FROM cdb_route_point_to_point('POINT(-3.70237112 40.41706163)'::geometry, " \
"'POINT(-3.69909883 40.41236875)'::geometry, 'car', " \
"ARRAY['mode_type=shortest']::text[])"
try:
IntegrationTestHelper.execute_query(self.sql_api_url, query)
except Exception as e:
assert_in(e.message[0], ["Routing permission denied", "function cdb_route_point_to_point(geometry, geometry, unknown, text[]) does not exist"])
def test_if_select_with_routing_with_waypoints_is_ok(self):
query = "SELECT duration, length, shape as the_geom " \
"FROM cdb_route_with_waypoints(Array['POINT(-3.7109 40.4234)'::GEOMETRY, "\
"'POINT(-3.7059 40.4203)'::geometry, 'POINT(-3.7046 40.4180)'::geometry]" \
"::geometry[], 'car', " \
"ARRAY['mode_type=shortest']::text[])&api_key={0}".format(
self.env_variables['api_key'])
routing = IntegrationTestHelper.execute_query(self.sql_api_url, query)
assert_not_equal(routing['the_geom'], None)
def test_if_select_with_routing_with_waypoints_without_api_key_raise_error(self):
query = "SELECT duration, length, shape as the_geom " \
"FROM cdb_route_with_waypoints(Array['POINT(-3.7109 40.4234)'::geometry, "\
"'POINT(-3.7059 40.4203)'::geometry, 'POINT(-3.7046 40.4180)'::geometry]" \
"::geometry[], 'car', " \
"ARRAY['mode_type=shortest']::text[])&api_key={0}".format(
self.env_variables['api_key'])
try:
IntegrationTestHelper.execute_query(self.sql_api_url, query)
except Exception as e:
assert_in(e.message[0], ["Routing permission denied", "function cdb_route_with_waypoints(geometry, geometry, text, text[]) does not exist"])
| 54.54386
| 155
| 0.642972
| 382
| 3,109
| 4.939791
| 0.235602
| 0.031797
| 0.059353
| 0.034446
| 0.790143
| 0.757287
| 0.757287
| 0.757287
| 0.72973
| 0.703233
| 0
| 0.062604
| 0.224188
| 3,109
| 56
| 156
| 55.517857
| 0.719735
| 0
| 0
| 0.571429
| 0
| 0.040816
| 0.394339
| 0.174976
| 0
| 0
| 0
| 0
| 0.122449
| 1
| 0.102041
| false
| 0
| 0.081633
| 0
| 0.204082
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
6eba95e7d27c08c57176b0d155f16202eb154ec1
| 38,904
|
py
|
Python
|
lib/dl/models/factor_graph.py
|
BeautyOfWeb/DeepBio
|
9207357bd3591f67d8e23c7dad217938dcc123ed
|
[
"MIT"
] | 5
|
2019-03-05T14:21:37.000Z
|
2021-04-30T12:25:49.000Z
|
lib/dl/models/factor_graph.py
|
hongqin/DeepBio
|
9207357bd3591f67d8e23c7dad217938dcc123ed
|
[
"MIT"
] | null | null | null |
lib/dl/models/factor_graph.py
|
hongqin/DeepBio
|
9207357bd3591f67d8e23c7dad217938dcc123ed
|
[
"MIT"
] | 2
|
2020-10-26T08:58:34.000Z
|
2021-03-04T21:32:06.000Z
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from .dag import StackedDAGLayers
class Factor1d(nn.Module):
"""Similar to masked attention
"""
def __init__(self, in_features, in_dim, out_features, out_dim, adj_mat=None, bias=True):
super(Factor1d, self).__init__()
self.linear1 = nn.Linear(in_dim, out_dim, bias) # based on intuition, not justified
self.linear2 = nn.Linear(out_dim, out_dim, bias)
self.linear3 = nn.Linear(in_features, out_features, bias)
self.linear4 = nn.Linear(out_features, out_features, bias)
self.adj_mat = adj_mat
def forward(self, x):
out = F.relu(self.linear2(F.relu(self.linear1(x))).transpose(1, 2)) # (NxDxC -> NxCxD)
if self.adj_mat is None:
return self.linear4(F.relu(self.linear3(out))).transpose(1, 2)
else:
return self.linear4(F.relu(
F.linear(out, self.linear3.weight*self.adj_mat.float(), self.linear3.bias))).transpose(1, 2)
class EmbedCell(nn.Module):
r"""This is a bottleneck layer(s) using 1-D convolution layer(s) with kernel_size = 1
The goal is to transform vectors in R^in_channels to R^out_channels
An nn.Conv1d is used to map its corresponding subset of source nodes for each target node
It is essentially to a linear transformation;
1-D convolution with kernel_size=1 enables parameter sharing
Args:
in_channels: int
out_channels: int for a single layer or a list/tuple of ints for multiple layers
use_layer_norm: if True, apply nn.LayerNorm to each instance
bias: whether or not to use bias in nn.Conv1d
residual: only used for multiple layers; if True, add skip connections
duplicate_cell: only used for multiple layers;
if True, all layers share the same parameters like recurrent neural networks
nonlinearlity: None, nn.ReLU() or other nonlinearity; apply to output in the middle
I have NOT figured out how to arrange the LayerNorm and nonlinearity and residual connections
Shape:
- Input: N * in_channels * M, where M = the number of input nodes
- Output: N * out * M, where out = out_channels or out_channels[-1] (multiple layers)
Attributes:
weights (and biases) for a nn.Conv1d or a list of nn.Conv1d
Examples::
>>> x = torch.randn(2, 3, 5)
>>> model = EmbedCell(3, [3,3], use_layer_norm=True, bias=True,
residual=True, duplicate_cell=True, nonlinearity=nn.ReLU())
>>> y = model(x)
>>> y.shape, y.mean(1), y.std(1, unbiased=False)
"""
def __init__(self, in_channels, out_channels, use_layer_norm=True, bias=True,
residual=True, duplicate_cell=True, nonlinearity=None):
super(EmbedCell, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.use_layer_norm = use_layer_norm
self.bias = bias
self.residual = residual
self.duplicate_cell = duplicate_cell
self.nonlinearity = nonlinearity
if isinstance(out_channels, int):
out_channels = [out_channels]
self.out_channels = out_channels
if isinstance(out_channels, (list, tuple)):
if len(out_channels)>1 and (duplicate_cell or residual):
for out in out_channels:
assert out == in_channels
if duplicate_cell:
self.maps = nn.ModuleList([nn.Conv1d(in_channels, out_channels[0], kernel_size=1, bias=bias)]
* len(out_channels))
else:
self.maps = nn.ModuleList([nn.Conv1d(in_channels if i==0 else out_channels[i-1],
out, kernel_size=1, bias=bias)
for i, out in enumerate(out_channels)])
if self.use_layer_norm:
# we can directly use torch.nn.functional.layer_norm in forward function without parameters
self.layer_norms = nn.ModuleList(
[nn.LayerNorm(out, eps=1e-5, elementwise_affine=False)
for out in out_channels]
)
else:
raise ValueError(f'out_channels must have type int, list or tuple, but is {type(out_channels)}')
def forward(self, x):
for i in range(len(self.out_channels)):
out = self.maps[i](x)
# Should I put nonlinearity before layer_norm?
if isinstance(self.nonlinearity, nn.Module):
out = self.nonlinearity(out)
if self.use_layer_norm:
out = self.layer_norms[i](out.transpose(-1,-2)).transpose(-1,-2)
if self.residual and i<len(self.out_channels)-1: # no residual in the last layer
out += x
x = out
return out
class GraphConvolution1d(nn.Module):
r"""Implement modified Graph Convolutional Neural Network
Provide with options ResNet-like model with stochastic depth
Fixed graph attention matrices generated from deterministic/random walk on the bipartite graph
We can use BipartiteGraph1d to implement much of this; but for clarity, write a separate class here
Args:
num_features: int
num_layers: int
duplicate_layers: if True, all layers will share the same parameters
dense: if True, connect all previous layers to the current layer
residual: if True, use skip connections; only used when dense is False
use_bias: default, False
use_layer_norm: if True, apply nn.LayerNorm to the output from each layer
num_cls: if num_cls>=1, then add a classification/regression head on top of the last target layer
and return the final output
Shape:
Input:x is torch.Tensor of size (N, num_features)
attention_mats can store a list of normalized adjacency matrices from current layers
to the nodes in previous layers;
in Graph Convolution Network paper, it only have one fixed first-order adjacency matrix;
here it is enabled for using multi-scale reception field;
Let M be the adjacency matrix from source to target (itself)
attention_mats = [M.T, (M*M).T, (M*M*M).T, ...]
these transition mats are normalized and transposed
Output: depending on return_layers: e.g., if return_layers=='all', then return torch.stack(history, dim=-1)
Examples:
adj_list = [[3, 4], [5, 6], [5, 4], [6, 4], [3, 6]]
adj_mat, _ = adj_list_to_mat(adj_list, bipartite=False)
in_features, out_features = adj_mat.shape
attention_mats, _ = adj_list_to_attention_mats(adj_list, num_steps=10, bipartite=False)
model = GraphConvolution1d(num_features=in_features, num_layers=5, duplicate_layers=False,
dense=False, residual=False, use_bias=True, use_layer_norm=False, nonlinearity=nn.ReLU(),
num_cls=2, classifier_bias=True)
x = torch.randn(5, in_features)
y = model(x, attention_mats, max_num_layers=10, min_num_layers=10,
return_layers='last-layer')
y.shape
"""
def __init__(self, num_features, num_layers, duplicate_layers=False, dense=False, residual=False,
use_bias=False, use_layer_norm=False, nonlinearity=nn.ReLU(), num_cls=0, classifier_bias=True):
super(GraphConvolution1d, self).__init__()
self.num_features = num_features
self.num_layers = num_layers
self.duplicate_layers = duplicate_layers
self.dense = dense
self.residual = residual
self.use_bias = use_bias
self.use_layer_norm = use_layer_norm
self.nonlinearity = nonlinearity
self.num_cls = num_cls
if self.duplicate_layers:
self.weights = nn.ParameterList([nn.Parameter(torch.randn(num_features, num_features),
requires_grad=True)]*self.num_layers)
if self.use_bias:
self.biases = nn.ParameterList([nn.Parameter(torch.randn(num_features),
requires_grad=True)]*self.num_layers)
else:
self.weights = nn.ParameterList([nn.Parameter(torch.randn(num_features, num_features),
requires_grad=True) for _ in range(self.num_layers)])
if self.use_bias:
self.biases = nn.ParameterList([nn.Parameter(torch.randn(num_features),
requires_grad=True) for _ in range(self.num_layers)])
if self.use_layer_norm:
self.layer_norm = nn.LayerNorm(num_features, eps=1e-05, elementwise_affine=False)
if self.num_cls >= 1:
self.classifier = nn.Linear(num_features, num_cls, bias=classifier_bias)
def forward(self, x, attention_mats, max_num_layers=2, min_num_layers=2, return_layers='last-layer'):
"""
Args:
x: 2-D tensor with shape (N, num_features)
attention_mats: normalized attention matrix with shape (num_features, num_features);
or a list of attention matrices
"""
# stochastic depth; num_layers can even be larger than self.num_layers
num_layers = np.random.randint(min_num_layers, max_num_layers+1)
history = [x] # the first layer is the original input
for i in range(1, num_layers):
if self.dense:
y = [] # this is for i th layer; if self.dense is True, then connect all previous layers to current layer
for j in range(i):
if isinstance(attention_mats, list):
adj = attention_mats[(i-j-1) % len(attention_mats)]
else:
adj = attention_mats
# if num_layers > len(self.weights), we can reuse the weight by using j % len(self.weights)
cur_y = torch.mm(history[j], self.weights[j % len(self.weights)] * adj)
if self.use_bias:
cur_y = cur_y + self.biases[j % len(self.biases)]
y.append(cur_y)
cur_y = torch.stack(y, dim=0).mean(dim=0)
else:
if isinstance(attention_mats, list):
adj = attention_mats[0]
else:
adj = attention_mats
cur_y = torch.mm(history[i-1], self.weights[(i-1) % len(self.weights)] * adj)
if self.use_bias:
cur_y = cur_y + self.biases[(i-1) % len(self.biases)]
if isinstance(self.nonlinearity, nn.Module):
cur_y = self.nonlinearity(cur_y)
if self.residual:
cur_y += history[i-1]
if self.use_layer_norm:
cur_y = self.layer_norm(cur_y)
history.append(cur_y)
if self.num_cls >= 1:
return self.classifier(history[-1])
if return_layers == 'last-layer':
return history[-1]
elif return_layers == 'all-but-first':
# excluding the original input
return torch.stack(history[1:], dim=-1)
elif return_layers == 'all':
return torch.stack(history, dim=-1)
class BipartiteGraph1d(nn.Module):
r"""Encode a bipartite graph into the model architecture;
ResNet-like model with stochastic depth
Fixed graph attention matrices generated from deterministic/random walk on the bipartite graph
Args:
in_features: int
out_features: int
use_layer_norm: if True, apply nn.LayerNorm to the output from each layer
num_cls: if num_cls>=1, then add a classification/regression head on top of the last target layer
and return the final output
Shape:
Input: x is torch.Tensor of size (N, in_features)
attention_mats = [source_attention_mats, target_attention_mats];
source_attention_mats stores attention mats from source to the nodes in previous layers;
target_attention_mats stores attention mats from target to the nodes in previous layers;
Let Ms be the adjacency matrix from source to target, and Mt from target to source
source_attention_mats = [Ms.T, (Ms*Mt).T, (Ms*Mt*Ms).T, ...],
target_attention_mats = [Mt.T, (Mt*Ms).T, (Mt*Ms*Mt).T, ...];
source_attention_mats stores transition mat from source with 1,2,... steps,
target_attention_mats stores transition mat from target with 1,2,... steps,
these transition mats are normalized and transposed
Examples:
adj_list = [[3, 4], [5, 6], [5, 4], [6, 4], [3, 6]]
adj_mat, _ = adj_list_to_mat(adj_list, bipartite=True)
in_features, out_features = adj_mat.shape
attention_mats, _ = adj_list_to_attention_mats(adj_list, num_steps=10, bipartite=True)
model = BipartiteGraph1d(in_features=in_features, out_features=out_features,
use_layer_norm=True)
x = torch.randn(5, in_features)
y = model(x, attention_mats, max_num_layers=10, min_num_layers=10,
return_layers='last-two')
y[0].shape, y[1].shape
"""
def __init__(self, in_features, out_features, nonlinearity=nn.ReLU(), use_layer_norm=True, num_cls=0,
classifier_bias=True):
super(BipartiteGraph1d, self).__init__()
self.source_to_target = nn.Parameter(torch.randn(in_features, out_features),
requires_grad=True)
self.target_to_source = nn.Parameter(torch.randn(out_features, in_features),
requires_grad=True)
self.nonlinearity = nonlinearity
self.use_layer_norm = use_layer_norm
if self.use_layer_norm:
self.layer_norm_source = nn.LayerNorm(in_features, eps=1e-05, elementwise_affine=False)
self.layer_norm_target = nn.LayerNorm(out_features, eps=1e-05, elementwise_affine=False)
self.num_cls = num_cls
if self.num_cls >= 1:
self.classifier = nn.Linear(out_features, num_cls, bias=classifier_bias)
def forward(self, x, attention_mats, max_num_layers=2, min_num_layers=2, return_layers='last-two'):
# stochastic depth
num_layers = np.random.randint(min_num_layers, max_num_layers+1)
if num_layers % 2 != 0:
# make sure num_layers is even so that the last two layers are source and target
num_layers += 1
history = [x] # the first layer is the original input (source)
for i in range(1, num_layers):
y = [] # this is for i th layer; if i is even, then it is source; otherwise target
for j in range(i):
if i%2 == 0 and j%2 == 0: # both i and j are sources
# # if attention_mats is too big, we may only store two of them,
# # thus disabling multi-scale long-range interaction;
# # this is why (i-j-1) % len(attention_mats[0]) instead of i-j-1 is used
# # to avoid size mismatch:
# assert len(attention_mats[0]) % 2 == 0 and len(attention_mats[1]) % 2 == 0
y.append(torch.mm(history[j], attention_mats[0][(i-j-1) % len(attention_mats[0])]))
elif i%2 == 0 and j%2 != 0: # j is target, i is source
y.append(torch.mm(history[j],
self.target_to_source * attention_mats[0][(i-j-1) % len(attention_mats[0])]))
elif i%2 != 0 and j%2 == 0: # j is source, i is target
y.append(torch.mm(history[j],
self.source_to_target * attention_mats[1][(i-j-1) % len(attention_mats[1])]))
else: # both i and j are targets
y.append(torch.mm(history[j], attention_mats[1][(i-j-1) % len(attention_mats[1])]))
y = torch.stack(y, dim=0).mean(dim=0)
if isinstance(self.nonlinearity, nn.Module):
y = self.nonlinearity(y)
if self.use_layer_norm:
if i%2 == 0: # even numbers are source
y = self.layer_norm_source(y)
else: # odd numbers are target
y = self.layer_norm_target(y)
history.append(y)
if self.num_cls >= 1:
return self.classifier(history[-1])
if return_layers == 'last-target':
return history[-1]
elif return_layers == 'last-two':
return history[-2:]
elif return_layers == 'all-source-target':
# source.size() = (N, in_features, num_layers/2)
source = torch.stack([history[i] for i in range(num_layers) if i%2==0], dim=-1)
# target.size() = (N, out_features, num_layers/2)
target = torch.stack([history[i] for i in range(num_layers) if i%2!=0], dim=-1)
return source, target
elif return_layers == 'all':
return history
class BipartiteGraph(nn.Module):
r"""Encode a bipartite graph into the model architecture;
ResNet-like model with stochastic depth
Fixed graph attention matrices generated from deterministic/random walk on the bipartite graph
Args:
in_features: int
out_features: int
in_dim: int
out_dim: int
use_layer_norm: if True, apply nn.LayerNorm to the output from each layer
Shape:
Input: x is torch.Tensor of size (N, in_dim, in_features)
attention_mats = [source_attention_mats, target_attention_mats];
source_attention_mats stores attention mats from source to the nodes in previous layers;
target_attention_mats stores attention mats from target to the nodes in previous layers;
Let Mt be the adjacency matrix from source to target, and Ms from target to source;
in the obsolete version:
source_attention_mats = [Ms, Mt*Ms, Ms*Mt*Ms, ...],
target_attention_mats = [Mt, Ms*Mt, Mt*Ms*Mt, ...];
source_attention_mats are to reach source with 1,2,... steps,
target_attention_mats are to reach target
in the CURRENT version:
source_attention_mats = [Mt.T, (Mt*Ms).T, (Mt*Ms*Mt).T, ...],
target_attention_mats = [Ms.T, (Ms*Mt).T, (Ms*Mt*Ms).T, ...];
source_attention_mats stores transition mat from source with 1,2,... steps,
target_attention_mats stores transition mat from target with 1,2,... steps,
these transition mats are transposed
Examples:
adj_list = [[3, 4], [5, 6], [5, 4], [6, 4], [3, 6]]
attention_mats, _ = adj_list_to_attention_mats(adj_list, num_steps=10, bipartite=True,
use_transition_matrix=True)
model = BipartiteGraph(in_features, out_features, in_dim=5, out_dim=11,
use_layer_norm=True)
x = torch.randn(7, 5, 3)
y = model(x, attention_mats, max_num_layers=10, min_num_layers=8,
return_layers='last-two')
y[0].shape, y[1].shape
"""
def __init__(self, in_features, out_features, in_dim, out_dim, use_layer_norm=True):
super(BipartiteGraph, self).__init__()
self.source_to_target = nn.Parameter(torch.randn(in_dim, in_features, out_features, out_dim),
requires_grad=True)
self.target_to_source = nn.Parameter(torch.randn(out_dim, out_features, in_features, in_dim),
requires_grad=True)
self.use_layer_norm = use_layer_norm
if self.use_layer_norm:
self.layer_norm_source = nn.LayerNorm([in_dim, in_features], eps=1e-05, elementwise_affine=False)
self.layer_norm_target = nn.LayerNorm([out_dim, out_features], eps=1e-05, elementwise_affine=False)
def forward(self, x, attention_mats, max_num_layers=2, min_num_layers=2,
return_layers='last-two'):
# stochastic depth
num_layers = np.random.randint(min_num_layers, max_num_layers+1)
if num_layers % 2 != 0:
# make sure num_layers is even so that the last two layers are source and target
num_layers += 1
history = [x] # the first layer is the original input (source)
for i in range(1, num_layers):
y = [] # this is for i th layer; if i is even, then it is source; otherwise target
for j in range(i):
if i%2 == 0 and j%2 == 0: # both i and j are sources
# # if attention_mats is too big, we may only store two of them, or four, six, eight, ..., of them,
# # thus disabling multi-scale long-range interaction and saves memeory;
# # this is why (i-j-1) % len(attention_mats[0]) instead of i-j-1 is used
# # to avoid size mismatch:
# assert len(attention_mats[0]) % 2 == 0 and len(attention_mats[1]) % 2 == 0
y.append(torch.matmul(history[j], attention_mats[0][(i-j-1) % len(attention_mats[0])]))
elif i%2 == 0 and j%2 != 0: # j is target, i is source
weight = self.target_to_source * attention_mats[0][(i-j-1) % len(attention_mats[0])].unsqueeze(-1)
new_y = (history[j].unsqueeze(-1).unsqueeze(-1) * weight).sum(dim=1).sum(dim=1).transpose(1,2)
y.append(new_y)
elif i%2 != 0 and j%2 == 0: # j is source, i is target
weight = self.source_to_target * attention_mats[1][(i-j-1) % len(attention_mats[1])].unsqueeze(-1)
new_y = (history[j].unsqueeze(-1).unsqueeze(-1) * weight).sum(dim=1).sum(dim=1).transpose(1,2)
y.append(new_y)
else: # both i and j are targets
y.append(torch.matmul(history[j], attention_mats[1][(i-j-1) % len(attention_mats[1])]))
y = torch.stack(y, dim=0).mean(dim=0)
if self.use_layer_norm:
if i%2 == 0: # even numbers are source
y = self.layer_norm_source(y)
else: # odd numbers are target
y = self.layer_norm_target(y)
history.append(y)
if return_layers == 'last-target':
return history[-1]
elif return_layers == 'last-two':
return history[-2:]
elif return_layers == 'all-source-target':
# source.size() = (N, in_features, num_layers/2)
source = torch.stack([history[i] for i in range(num_layers) if i%2==0], dim=-1)
# target.size() = (N, out_features, num_layers/2)
target = torch.stack([history[i] for i in range(num_layers) if i%2!=0], dim=-1)
return source, target
elif return_layers == 'all':
return history
class GeneNet(nn.Module):
r"""Gene-Pathway(GO) network: gene0->gene1->pathway0->pathway1->gene0->...
Args:
num_genes: int
num_pathways: int
attention_mats: if provided, it should be a dictionary with keys:
'gene1->gene0': a list of the attention mats from genes to genes;
the computation is from gene0->gene1
'pathway0->gene1': a list of the attention mats from pathways to genes;
the computation is from gene1->pathway0
'pathway1->pathway0': a list of the attention mats from pathways to pathways;
the computation is from pathway0->pathway1
'gene0->pathway1': a list of the attention mats from genes to pathways;
the computation is from pathway1->gene0
dense: if True, add skip connections from all previous layers to current layer
nonlinearity: if provided as nn.Module, then apply it to output
use_layer_norm: if True, apply layer_norm to output
Currently, I put nonlinearity before layer norm;
Should I put nonlinearity before layer norm or otherwise?
num_cls: if num_cls>=1, then add an classifier or regression head using the pathway1-last-layer output as input;
otherwise do nothing
Shape:
Input: x: (N, num_genes)
attention_mats: see class doc
Output: if return_layers=='all'
return a dictionary with four keys: 'gene0', 'gene1', 'pathway0', 'pathway1',
the values have shape (N, num_genes/pathways, num_layers)
Examples:
attention_mats = {}
num_steps = 10
num_genes = 23
num_pathways = 11
name_to_id_gene = {i: i for i in range(num_genes)}
p = 0.4
gene_gene_mat = np.random.uniform(0, 1, (num_genes, num_genes))
gene_gene_list = np.array(np.where(gene_gene_mat < p)).T
# adj_list_to_mat(gene_gene_list, name_to_id=name_to_id_gene, add_self_loop=True, symmetric=True,
# bipartite=False)
attention_mats['gene1->gene0'], id_to_name_gene = adj_list_to_attention_mats(
gene_gene_list, num_steps=num_steps, name_to_id=name_to_id_gene, bipartite=False,
add_self_loop=True, symmetric=True, target_to_source=None, use_transition_matrix=True,
softmax_normalization=False, min_value=-100, device=torch.device('cpu'))
pathway_pathway_list = np.array([[1, 2], [3, 2], [1, 3], [2, 4], [5,3], [1, 5], [2, 6], [5,2]])
name_to_id_pathway, _ = get_topological_order(pathway_pathway_list,
edge_direction='left->right')
for i in range(num_pathways):
if i not in name_to_id_pathway:
name_to_id_pathway[i] = len(name_to_id_pathway)
dag = collections.defaultdict(list)
for s in pathway_pathway_list:
left = name_to_id_pathway[s[0]]
right = name_to_id_pathway[s[1]]
dag[right].append(left)
dag = {k: sorted(set(v)) for k, v in dag.items()}
attention_mats['pathway1->pathway0'], id_to_name_pathway = adj_list_to_attention_mats(
pathway_pathway_list, num_steps=num_steps, name_to_id=name_to_id_pathway, bipartite=False,
add_self_loop=False, symmetric=False, target_to_source=None, use_transition_matrix=True,
softmax_normalization=False, min_value=-100, device=torch.device('cpu'))
gene_pathway_mat = np.random.uniform(0, 1, (num_genes, num_pathways))
gene_pathway_list = np.array(np.where(gene_pathway_mat < p)).T
# adj_list_to_mat(gene_pathway_list, name_to_id=[name_to_id_gene, name_to_id_pathway],
# bipartite=True)
mats, _ = adj_list_to_attention_mats(
gene_pathway_list, num_steps=num_steps*2, name_to_id=[name_to_id_gene, name_to_id_pathway],
bipartite=True, add_self_loop=False, symmetric=False, target_to_source=None,
use_transition_matrix=True, softmax_normalization=False, min_value=-100,
device=torch.device('cpu'))
# this is very tricky:
# the even positions are all gene->pathway in mats[0], while odd ones gene->gene
attention_mats['gene0->pathway1'] = [m for i, m in enumerate(mats[0]) if i%2==0]
attention_mats['pathway0->gene1'] = [m for i, m in enumerate(mats[1]) if i%2==0]
model = GeneNet(num_genes, num_pathways, attention_mats=None, dense=True, use_dag_layer=True,
dag=dag, dag_in_channel_list=[1,1,1],
dag_kwargs={'residual':True, 'duplicate_dag':True}, nonlinearity=nn.ReLU(),
use_layer_norm=True)
x = torch.randn(5, num_genes)
y = model(x, attention_mats=attention_mats, max_num_layers=num_steps, min_num_layers=num_steps,
return_layers='all')
y[0].shape, y[1].shape, y[2].shape, y[3].shape
"""
def __init__(self, num_genes, num_pathways, attention_mats=None, dense=True, use_dag_layer=False,
dag=None, dag_in_channel_list=[1], dag_kwargs={}, nonlinearity=nn.ReLU(), use_layer_norm=True,
num_cls=0, classifier_bias=True):
super(GeneNet, self).__init__()
self.weights = nn.ParameterDict()
self.weights['gene0->gene1'] = nn.Parameter(torch.randn(num_genes, num_genes))
self.weights['gene1->pathway0'] = nn.Parameter(torch.randn(num_genes, num_pathways))
self.weights['pathway0->pathway1'] = nn.Parameter(torch.randn(num_pathways, num_pathways))
self.weights['pathway1->gene0'] = nn.Parameter(torch.randn(num_pathways, num_genes))
self.dense = dense
self.nonlinearity = nonlinearity
self.use_layer_norm = use_layer_norm
self.attention_mats = attention_mats
self.use_dag_layer = use_dag_layer and dag is not None
if self.use_dag_layer:
self.dag_layers = StackedDAGLayers(dag=dag, in_channels_list=dag_in_channel_list, **dag_kwargs)
self.num_cls = num_cls
if num_cls>=1:
self.classifier = nn.Linear(num_pathways, num_cls, bias=classifier_bias)
def forward_one_layer(self, history, attention_mats, in_name, out_name, i, j):
# print(in_name, out_name, i, j)
## use j%len_attention_mats instead of j so that we can forward more steps beyond the range of attention_mats
len_attention_mats = len(attention_mats[f'{out_name}->{in_name}'])
x = torch.mm(history[in_name][i],
self.weights[f'{in_name}->{out_name}'] * attention_mats[f'{out_name}->{in_name}'][j%len_attention_mats])
if isinstance(self.nonlinearity, nn.Module):
x = self.nonlinearity(x)
if self.use_layer_norm:
x = nn.functional.layer_norm(x, (x.size(-1),), weight=None, bias=None, eps=1e-5)
return x
def forward(self, x, attention_mats=None, max_num_layers=2, min_num_layers=2,
return_layers='pathway1-last-layer'):
"""
Args:
x: (N, num_genes)
attention_mats: see class doc; if provided here use this instead of self.attention_mats
max_num_layers: int
min_num_layers: int
return_layers: only used when self.num_cls <= 1;
when self.num_cls > 1, then return classification score matrix instead
"""
if attention_mats is None:
assert self.attention_mats is not None
attention_mats = self.attention_mats
num_layers = np.random.randint(min_num_layers, max_num_layers+1)
history = {'gene0': [x], 'gene1': [], 'pathway0': [], 'pathway1': []}
for l in range(num_layers):
gene0 = []
gene1 = []
pathway0 = []
pathway1 = []
if self.dense:
start = 0
else:
start = l
for j in range(start, l+1):
x = self.forward_one_layer(history, attention_mats, 'gene0', 'gene1', j, l-j)
gene1.append(x)
if self.dense:
history['gene1'].append(torch.stack(gene1, dim=-1).mean(dim=-1))
else:
history['gene1'].append(gene1[-1])
for j in range(start, l+1):
x = self.forward_one_layer(history, attention_mats, 'gene1', 'pathway0', j, l-j)
pathway0.append(x)
if self.dense:
history['pathway0'].append(torch.stack(pathway0, dim=-1).mean(dim=-1))
else:
history['pathway0'].append(pathway0[-1])
for j in range(start, l+1):
x = self.forward_one_layer(history, attention_mats, 'pathway0', 'pathway1', j, l-j)
if self.use_dag_layer:
x = self.dag_layers(x)
pathway1.append(x)
if self.dense:
history['pathway1'].append(torch.stack(pathway1, dim=-1).mean(dim=-1))
else:
history['pathway1'].append(pathway1[-1])
if l < num_layers-1:
for j in range(start, l+1):
x = self.forward_one_layer(history, attention_mats, 'pathway1', 'gene0', j, l-j)
gene0.append(x)
if self.dense:
history['gene0'].append(torch.stack(gene0, dim=-1).mean(dim=-1))
else:
history['gene0'].append(gene0[-1])
if self.num_cls >= 1:
cls_score = self.classifier(history['pathway1'][-1])
return cls_score
if return_layers=='all':
return (torch.stack(history['gene0'], dim=-1), torch.stack(history['gene1'], dim=-1),
torch.stack(history['pathway0'], dim=-1), torch.stack(history['pathway1'], dim=-1))
if return_layers=='pathway1-all':
return torch.stack(history['pathway1'], dim=-1)
if return_layers=='pathway1-last-layer':
return history['pathway1'][-1]
if return_layers=='gene1-all':
return torch.stack(history['gene1'], dim=-1)
if return_layers=='gene1-last-layer':
return history['gene1'][-1]
class PathNet(nn.Module):
r"""Gene-Pathway(GO) network: gene->pathway0->pathway1->gene->...
The only difference between PathNet and GeneNet is PathNet do not have gene-gene interaction data available;
so gene0->gene1 was replaced by gene in PathNet
Args:
num_genes: int
num_pathways: int
attention_mats: if provided, it should be a dictionary with keys:
'pathway0->gene': a list of the attention mats from pathways to genes;
the computation is from gene->pathway0
'pathway1->pathway0': a list of the attention mats from pathways to pathways;
the computation is from pathway0->pathway1
'gene->pathway1': a list of the attention mats from genes to pathways;
the computation is from pathway1->gene
dense: if True, add skip connections from all previous layers to current layer
nonlinearity: if provided as nn.Module, then apply it to output
use_layer_norm: if True, apply layer_norm to output
Currently, I put nonlinearity before layer norm;
Should I put nonlinearity before layer norm or otherwise?
num_cls: if num_cls>=1, then add an classifier or regression head using the pathway1-last-layer output as input;
otherwise do nothing
Shape:
Input: x: (N, num_genes)
attention_mats: see class doc
Output: if return_layers=='all'
return a dictionary with three keys: 'gene', 'pathway0', 'pathway1',
the values have shape (N, num_genes/pathways, num_layers)
Examples:
attention_mats = {}
num_steps = 10
num_genes = 23
num_pathways = 11
name_to_id_gene = {i: i for i in range(num_genes)}
pathway_pathway_list = np.array([[1, 2], [3, 2], [1, 3], [2, 4], [5,3], [1, 5], [2, 6], [5,2]])
name_to_id_pathway, _ = get_topological_order(pathway_pathway_list,
edge_direction='left->right')
for i in range(num_pathways):
if i not in name_to_id_pathway:
name_to_id_pathway[i] = len(name_to_id_pathway)
dag = collections.defaultdict(list)
for s in pathway_pathway_list:
left = name_to_id_pathway[s[0]]
right = name_to_id_pathway[s[1]]
dag[right].append(left)
dag = {k: sorted(set(v)) for k, v in dag.items()}
attention_mats['pathway1->pathway0'], id_to_name_pathway = adj_list_to_attention_mats(
pathway_pathway_list, num_steps=num_steps, name_to_id=name_to_id_pathway, bipartite=False,
add_self_loop=False, symmetric=False, target_to_source=None, use_transition_matrix=True,
softmax_normalization=False, min_value=-100, device=torch.device('cpu'))
p = 0.4
gene_pathway_mat = np.random.uniform(0, 1, (num_genes, num_pathways))
gene_pathway_list = np.array(np.where(gene_pathway_mat < p)).T
# adj_list_to_mat(gene_pathway_list, name_to_id=[name_to_id_gene, name_to_id_pathway],
# bipartite=True)
mats, _ = adj_list_to_attention_mats(
gene_pathway_list, num_steps=num_steps*2, name_to_id=[name_to_id_gene, name_to_id_pathway],
bipartite=True, add_self_loop=False, symmetric=False, target_to_source=None,
use_transition_matrix=True, softmax_normalization=False, min_value=-100,
device=torch.device('cpu'))
# this is very tricky:
# the even positions are all gene->pathway in mats[0], while odd ones gene->gene
attention_mats['gene->pathway1'] = [m for i, m in enumerate(mats[0]) if i%2==0]
attention_mats['pathway0->gene'] = [m for i, m in enumerate(mats[1]) if i%2==0]
model = PathNet(num_genes, num_pathways, attention_mats=None, dense=True, use_dag_layer=True,
dag=dag, dag_in_channel_list=[1,1,1],
dag_kwargs={'residual':True, 'duplicate_dag':True}, nonlinearity=nn.ReLU(),
use_layer_norm=True, num_cls=0)
x = torch.randn(5, num_genes)
y = model(x, attention_mats=attention_mats, max_num_layers=num_steps, min_num_layers=num_steps,
return_layers='all')
y[0].shape, y[1].shape, y[2].shape
"""
def __init__(self, num_genes, num_pathways, attention_mats=None, dense=True, use_dag_layer=False,
dag=None, dag_in_channel_list=[1], dag_kwargs={}, nonlinearity=nn.ReLU(), use_layer_norm=True,
num_cls=0, classifier_bias=True):
super(PathNet, self).__init__()
self.weights = nn.ParameterDict()
self.weights['gene->pathway0'] = nn.Parameter(torch.randn(num_genes, num_pathways))
self.weights['pathway0->pathway1'] = nn.Parameter(torch.randn(num_pathways, num_pathways))
self.weights['pathway1->gene'] = nn.Parameter(torch.randn(num_pathways, num_genes))
self.dense = dense
self.nonlinearity = nonlinearity
self.use_layer_norm = use_layer_norm
self.attention_mats = attention_mats
self.use_dag_layer = use_dag_layer and dag is not None
if self.use_dag_layer:
self.dag_layers = StackedDAGLayers(dag=dag, in_channels_list=dag_in_channel_list, **dag_kwargs)
self.num_cls = num_cls
if num_cls>=1:
self.classifier = nn.Linear(num_pathways, num_cls, bias=classifier_bias)
def forward_one_layer(self, history, attention_mats, in_name, out_name, i, j):
# print(in_name, out_name, i, j)
## use j%len_attention_mats instead of j so that we can forward more steps beyond the range of attention_mats
len_attention_mats = len(attention_mats[f'{out_name}->{in_name}'])
x = torch.mm(history[in_name][i],
self.weights[f'{in_name}->{out_name}'] * attention_mats[f'{out_name}->{in_name}'][j%len_attention_mats])
if isinstance(self.nonlinearity, nn.Module):
x = self.nonlinearity(x)
if self.use_layer_norm:
x = nn.functional.layer_norm(x, (x.size(-1),), weight=None, bias=None, eps=1e-5)
return x
def forward(self, x, attention_mats=None, max_num_layers=2, min_num_layers=2,
return_layers='pathway1-last-layer'):
"""
Args:
x: (N, num_genes)
attention_mats: see class doc; if provided here use this instead of self.attention_mats
max_num_layers: int
min_num_layers: int
return_layers: only used when self.num_cls <= 1;
when self.num_cls > 1, then return classification score matrix instead
"""
if attention_mats is None:
assert self.attention_mats is not None
attention_mats = self.attention_mats
num_layers = np.random.randint(min_num_layers, max_num_layers+1)
history = {'gene': [x], 'pathway0': [], 'pathway1': []}
for l in range(num_layers):
gene = []
pathway0 = []
pathway1 = []
if self.dense:
start = 0
else:
start = l
for j in range(start, l+1):
x = self.forward_one_layer(history, attention_mats, 'gene', 'pathway0', j, l-j)
pathway0.append(x)
if self.dense:
history['pathway0'].append(torch.stack(pathway0, dim=-1).mean(dim=-1))
else:
history['pathway0'].append(pathway0[-1])
for j in range(start, l+1):
x = self.forward_one_layer(history, attention_mats, 'pathway0', 'pathway1', j, l-j)
if self.use_dag_layer:
x = self.dag_layers(x)
pathway1.append(x)
if self.dense:
history['pathway1'].append(torch.stack(pathway1, dim=-1).mean(dim=-1))
else:
history['pathway1'].append(pathway1[-1])
if l < num_layers-1:
for j in range(start, l+1):
x = self.forward_one_layer(history, attention_mats, 'pathway1', 'gene', j, l-j)
gene.append(x)
if self.dense:
history['gene'].append(torch.stack(gene, dim=-1).mean(dim=-1))
else:
history['gene'].append(gene[-1])
if self.num_cls >= 1:
cls_score = self.classifier(history['pathway1'][-1])
return cls_score
if return_layers=='all':
return (torch.stack(history['gene'], dim=-1),
torch.stack(history['pathway0'], dim=-1), torch.stack(history['pathway1'], dim=-1))
if return_layers=='pathway1-all':
return torch.stack(history['pathway1'], dim=-1)
if return_layers=='pathway1-last-layer':
return history['pathway1'][-1]
if return_layers=='gene-all':
return torch.stack(history['gene'], dim=-1)
if return_layers=='gene-last-layer':
return history['gene'][-1]
| 47.852399
| 116
| 0.661654
| 5,786
| 38,904
| 4.251988
| 0.066886
| 0.076091
| 0.019511
| 0.010975
| 0.801886
| 0.771929
| 0.747297
| 0.725144
| 0.696041
| 0.673644
| 0
| 0.020391
| 0.224758
| 38,904
| 813
| 117
| 47.852399
| 0.795325
| 0.486634
| 0
| 0.633495
| 0
| 0
| 0.04866
| 0.006571
| 0
| 0
| 0
| 0
| 0.007282
| 1
| 0.038835
| false
| 0
| 0.012136
| 0
| 0.140777
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
6ecaa5dc6c5b205204f392197c52e534c57e86a8
| 131
|
py
|
Python
|
update_workplace_pub_l2.py
|
jessevp07/lcoc-ldevs
|
c2dac1b17618fe50c3298aa3d915975a5740812a
|
[
"BSD-3-Clause"
] | 1
|
2022-01-23T20:20:07.000Z
|
2022-01-23T20:20:07.000Z
|
update_workplace_pub_l2.py
|
jessevp07/lcoc-ldevs
|
c2dac1b17618fe50c3298aa3d915975a5740812a
|
[
"BSD-3-Clause"
] | null | null | null |
update_workplace_pub_l2.py
|
jessevp07/lcoc-ldevs
|
c2dac1b17618fe50c3298aa3d915975a5740812a
|
[
"BSD-3-Clause"
] | 1
|
2021-12-17T15:12:00.000Z
|
2021-12-17T15:12:00.000Z
|
import lcoc.processing as proc
#calculate levelized cost of charging (state-level)
proc.calculate_state_workplace_public_l2_lcoc()
| 32.75
| 51
| 0.854962
| 19
| 131
| 5.631579
| 0.789474
| 0.242991
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008333
| 0.083969
| 131
| 4
| 52
| 32.75
| 0.883333
| 0.381679
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
42e08e58e16694c8704572893a5f6c3b1e3d352b
| 19
|
py
|
Python
|
django-server/tweets/management/commands/__init__.py
|
KilledByNLP/twitter-unpoptefy-server
|
f51665322c4e7934fac52f560a5d965d55c31914
|
[
"MIT"
] | null | null | null |
django-server/tweets/management/commands/__init__.py
|
KilledByNLP/twitter-unpoptefy-server
|
f51665322c4e7934fac52f560a5d965d55c31914
|
[
"MIT"
] | null | null | null |
django-server/tweets/management/commands/__init__.py
|
KilledByNLP/twitter-unpoptefy-server
|
f51665322c4e7934fac52f560a5d965d55c31914
|
[
"MIT"
] | 1
|
2021-01-24T02:39:44.000Z
|
2021-01-24T02:39:44.000Z
|
from . import train
| 19
| 19
| 0.789474
| 3
| 19
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157895
| 19
| 1
| 19
| 19
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
42f59168e52cd062679d4a77b2197a095094dafc
| 235
|
py
|
Python
|
securityheaders/checkers/expectct/__init__.py
|
th3cyb3rc0p/securityheaders
|
941264be581dc01afe28f6416f2d7bed79aecfb3
|
[
"Apache-2.0"
] | 151
|
2018-07-29T22:34:43.000Z
|
2022-03-22T05:08:27.000Z
|
securityheaders/checkers/expectct/__init__.py
|
th3cyb3rc0p/securityheaders
|
941264be581dc01afe28f6416f2d7bed79aecfb3
|
[
"Apache-2.0"
] | 5
|
2019-04-24T07:31:36.000Z
|
2021-04-15T14:31:23.000Z
|
securityheaders/checkers/expectct/__init__.py
|
th3cyb3rc0p/securityheaders
|
941264be581dc01afe28f6416f2d7bed79aecfb3
|
[
"Apache-2.0"
] | 42
|
2018-07-31T08:18:59.000Z
|
2022-03-28T08:18:32.000Z
|
from .checker import ExpectCTChecker
from .httpreporturi import ExpectCTHTTPReportURIChecker
from .notenforce import ExpectCTNotEnforcedChecker
__all__ = ['ExpectCTChecker','ExpectCTHTTPReportURIChecker','ExpectCTNotEnforcedChecker']
| 39.166667
| 89
| 0.87234
| 16
| 235
| 12.5625
| 0.5625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.068085
| 235
| 5
| 90
| 47
| 0.917808
| 0
| 0
| 0
| 0
| 0
| 0.293617
| 0.229787
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
6e44d5c8410654b25125dc74f00c01700a26da92
| 56
|
py
|
Python
|
pip_audit/__init__.py
|
di/pip-audit
|
ab25d5225a9a42e268598211fa425e53d34e8355
|
[
"Apache-2.0"
] | 1
|
2022-01-24T12:06:03.000Z
|
2022-01-24T12:06:03.000Z
|
pip_audit/__init__.py
|
di/pip-audit
|
ab25d5225a9a42e268598211fa425e53d34e8355
|
[
"Apache-2.0"
] | null | null | null |
pip_audit/__init__.py
|
di/pip-audit
|
ab25d5225a9a42e268598211fa425e53d34e8355
|
[
"Apache-2.0"
] | null | null | null |
from pip_audit.version import __version__ # noqa: F401
| 28
| 55
| 0.803571
| 8
| 56
| 5
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0625
| 0.142857
| 56
| 1
| 56
| 56
| 0.770833
| 0.178571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
2822314ed3c64406c63698020f888d76ba73be5d
| 218
|
py
|
Python
|
desafios/desafio#21.py
|
thiagocanabarro/PythonProjects
|
3b5cfff137d9d94b5fa0f0da9fe3ae6825b82269
|
[
"MIT"
] | null | null | null |
desafios/desafio#21.py
|
thiagocanabarro/PythonProjects
|
3b5cfff137d9d94b5fa0f0da9fe3ae6825b82269
|
[
"MIT"
] | null | null | null |
desafios/desafio#21.py
|
thiagocanabarro/PythonProjects
|
3b5cfff137d9d94b5fa0f0da9fe3ae6825b82269
|
[
"MIT"
] | null | null | null |
# Faça um programa em Python que abra e reproduza o áudio de um arquivo mp3
import pygame
pygame.mixer.init()
pygame.mixer.music.load('leguas.mp3')
pygame.mixer.music.play()
while(pygame.mixer.music.get_busy()): pass
| 27.25
| 75
| 0.770642
| 37
| 218
| 4.513514
| 0.702703
| 0.263473
| 0.287425
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010309
| 0.110092
| 218
| 8
| 76
| 27.25
| 0.850515
| 0.334862
| 0
| 0
| 0
| 0
| 0.069444
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.2
| 0.2
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
2897c9bd0d9863bddd8e9facbf684ceb6ca01458
| 5,793
|
py
|
Python
|
apybiomart/tests/conftest.py
|
robertopreste/apybiomart
|
a1e65c4a3431ad0ba92dfddc62e25dd51e860832
|
[
"MIT"
] | 4
|
2019-07-19T05:52:19.000Z
|
2021-11-05T10:32:41.000Z
|
apybiomart/tests/conftest.py
|
robertopreste/apybiomart
|
a1e65c4a3431ad0ba92dfddc62e25dd51e860832
|
[
"MIT"
] | 48
|
2019-04-01T14:46:58.000Z
|
2022-03-07T17:51:17.000Z
|
apybiomart/tests/conftest.py
|
robertopreste/apybiomart
|
a1e65c4a3431ad0ba92dfddc62e25dd51e860832
|
[
"MIT"
] | 2
|
2020-11-05T03:32:12.000Z
|
2020-11-21T06:21:35.000Z
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Created by Roberto Preste
import os
import pandas as pd
import pytest
DATADIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "data")
# marts
@pytest.fixture
def df_marts() -> pd.DataFrame:
"""Dataframe with available marts from Biomart."""
df = pd.read_pickle(os.path.join(DATADIR, "marts.pkl"))
return df
# datasets
@pytest.fixture
def df_datasets_ensembl() -> pd.DataFrame:
"""Dataframe with available datasets for the default mart
(ENSEMBL_MART_ENSEMBL)."""
df = pd.read_pickle(os.path.join(DATADIR, "datasets_ensembl.pkl"))
return df
@pytest.fixture
def df_datasets_mouse() -> pd.DataFrame:
"""Dataframe with available datasets for the ENSEMBL_MART_MOUSE mart."""
df = pd.read_pickle(os.path.join(DATADIR, "datasets_mouse.pkl"))
return df
@pytest.fixture
def df_datasets_sequence() -> pd.DataFrame:
"""Dataframe with available datasets for the ENSEMBL_MART_SEQUENCE mart."""
df = pd.read_pickle(os.path.join(DATADIR, "datasets_sequence.pkl"))
return df
@pytest.fixture
def df_datasets_ontology() -> pd.DataFrame:
"""Dataframe with available datasets for the ENSEMBL_MART_ONTOLOGY mart."""
df = pd.read_pickle(os.path.join(DATADIR, "datasets_ontology.pkl"))
return df
@pytest.fixture
def df_datasets_genomic() -> pd.DataFrame:
"""Dataframe with available datasets for the ENSEMBL_MART_GENOMIC mart."""
df = pd.read_pickle(os.path.join(DATADIR, "datasets_genomic.pkl"))
return df
@pytest.fixture
def df_datasets_snp() -> pd.DataFrame:
"""Dataframe with available datasets for the ENSEMBL_MART_SNP mart."""
df = pd.read_pickle(os.path.join(DATADIR, "datasets_snp.pkl"))
return df
@pytest.fixture
def df_datasets_funcgen() -> pd.DataFrame:
"""Dataframe with available datasets for the ENSEMBL_MART_FUNCGEN mart."""
df = pd.read_pickle(os.path.join(DATADIR, "datasets_funcgen.pkl"))
return df
# attributes
@pytest.fixture
def df_attributes_ensembl_hsapiens_gene() -> pd.DataFrame:
"""Dataframe with available attributes for the hsapiens_gene_ensembl
dataset."""
df = pd.read_pickle(os.path.join(DATADIR,
"attributes_hsapiens_gene_ensembl.pkl"))
return df
@pytest.fixture
def df_attributes_ontology_closure_eco() -> pd.DataFrame:
"""Dataframe with available attributes for the closure_ECO dataset."""
df = pd.read_pickle(os.path.join(DATADIR,
"attributes_closure_ECO.pkl"))
return df
@pytest.fixture
def df_attributes_genomic_hsapiens_encode() -> pd.DataFrame:
"""Dataframe with available attributes for the hsapiens_encode dataset."""
df = pd.read_pickle(os.path.join(DATADIR,
"attributes_hsapiens_encode.pkl"))
return df
@pytest.fixture
def df_attributes_snp_chircus_snp() -> pd.DataFrame:
"""Dataframe with available attributes for the chircus_snp dataset."""
df = pd.read_pickle(os.path.join(DATADIR,
"attributes_chircus_snp.pkl"))
return df
@pytest.fixture
def df_attributes_funcgen_hsapiens_peak() -> pd.DataFrame:
"""Dataframe with available attributes for the hsapiens_peak dataset."""
df = pd.read_pickle(os.path.join(DATADIR,
"attributes_hsapiens_peak.pkl"))
return df
# filters
@pytest.fixture
def df_filters_ensembl_hsapiens_gene() -> pd.DataFrame:
"""Dataframe with available filters for the hsapiens_gene_ensembl
dataset."""
df = pd.read_pickle(os.path.join(DATADIR,
"filters_hsapiens_gene_ensembl.pkl"))
return df
@pytest.fixture
def df_filters_ontology_closure_eco() -> pd.DataFrame:
"""Dataframe with available filters for the closure_ECO dataset."""
df = pd.read_pickle(os.path.join(DATADIR,
"filters_closure_ECO.pkl"))
return df
@pytest.fixture
def df_filters_genomic_hsapiens_encode() -> pd.DataFrame:
"""Dataframe with available filters for the hsapiens_encode dataset."""
df = pd.read_pickle(os.path.join(DATADIR,
"filters_hsapiens_encode.pkl"))
return df
@pytest.fixture
def df_filters_snp_chircus_snp() -> pd.DataFrame:
"""Dataframe with available filters for the chircus_snp dataset."""
df = pd.read_pickle(os.path.join(DATADIR,
"filters_chircus_snp.pkl"))
return df
@pytest.fixture
def df_filters_funcgen_hsapiens_peak() -> pd.DataFrame:
"""Dataframe with available filters for the hsapiens_peak dataset."""
df = pd.read_pickle(os.path.join(DATADIR,
"filters_hsapiens_peak.pkl"))
return df
# query
@pytest.fixture
def df_query_ensembl_hsapiens_gene_chrom_1() -> pd.DataFrame:
"""Dataframe with the expected query result for chromosome 1 of the
hsapiens_gene_ensembl dataset."""
df = pd.read_pickle(os.path.join(DATADIR,
"query_hsapiens_gene_chrom_1.pkl"))
return df
@pytest.fixture
def df_query_ensembl_hsapiens_gene_chrom_2() -> pd.DataFrame:
"""Dataframe with the expected query result for chromosome 2 of the
hsapiens_gene_ensembl dataset."""
df = pd.read_pickle(os.path.join(DATADIR,
"query_hsapiens_gene_chrom_2.pkl"))
return df
@pytest.fixture
def df_query_ensembl_hsapiens_gene_chrom_3() -> pd.DataFrame:
"""Dataframe with the expected query result for chromosome 3 of the
hsapiens_gene_ensembl dataset."""
df = pd.read_pickle(os.path.join(DATADIR,
"query_hsapiens_gene_chrom_3.pkl"))
return df
| 31.145161
| 79
| 0.679268
| 742
| 5,793
| 5.072776
| 0.091644
| 0.038257
| 0.058448
| 0.100425
| 0.89373
| 0.856004
| 0.855207
| 0.855207
| 0.662859
| 0.513815
| 0
| 0.002203
| 0.216296
| 5,793
| 185
| 80
| 31.313514
| 0.826872
| 0.275678
| 0
| 0.544554
| 0
| 0
| 0.127864
| 0.101503
| 0
| 0
| 0
| 0
| 0
| 1
| 0.207921
| false
| 0
| 0.029703
| 0
| 0.445545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
956b1bb9b396417b84dcba00c273240786a49f11
| 161
|
py
|
Python
|
mmdet/datasets/car_pedestrian.py
|
andrey1908/mmdetection
|
9920e6d1c76cede66776f45aefa2517b9ba5c41c
|
[
"Apache-2.0"
] | null | null | null |
mmdet/datasets/car_pedestrian.py
|
andrey1908/mmdetection
|
9920e6d1c76cede66776f45aefa2517b9ba5c41c
|
[
"Apache-2.0"
] | null | null | null |
mmdet/datasets/car_pedestrian.py
|
andrey1908/mmdetection
|
9920e6d1c76cede66776f45aefa2517b9ba5c41c
|
[
"Apache-2.0"
] | null | null | null |
from .coco import CocoDataset
from .builder import DATASETS
@DATASETS.register_module
class Car_Pedestrian(CocoDataset):
CLASSES = ('car', 'pedestrian')
| 16.1
| 35
| 0.763975
| 18
| 161
| 6.722222
| 0.666667
| 0.214876
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 161
| 9
| 36
| 17.888889
| 0.876812
| 0
| 0
| 0
| 0
| 0
| 0.08125
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
957b98f2643b1654978db71e88ee52d2674d9e1d
| 8,895
|
py
|
Python
|
poplar/dataset/tests/test_interactions.py
|
mortonjt/poplar
|
854d1ef819392f54536df386ef034091831802ed
|
[
"BSD-3-Clause"
] | null | null | null |
poplar/dataset/tests/test_interactions.py
|
mortonjt/poplar
|
854d1ef819392f54536df386ef034091831802ed
|
[
"BSD-3-Clause"
] | null | null | null |
poplar/dataset/tests/test_interactions.py
|
mortonjt/poplar
|
854d1ef819392f54536df386ef034091831802ed
|
[
"BSD-3-Clause"
] | null | null | null |
import unittest
import numpy as np
from poplar.util import get_data_path
import pandas as pd
from Bio import SeqIO
from poplar.dataset.interactions import (
InteractionDataset, ValidationDataset,
parse, preprocess,
clean, dictionary,
NegativeSampler)
class TestPreprocess(unittest.TestCase):
def setUp(self):
self.links_file = get_data_path('links.txt')
self.fasta_file = get_data_path('prots.fa')
def test_preprocess(self):
seqs = list(SeqIO.parse(self.fasta_file, format='fasta'))
links = pd.read_table(self.links_file, header=None)
truncseqs = list(map(clean, seqs))
seqids = list(map(lambda x: x.id, truncseqs))
seqdict = dict(zip(seqids, truncseqs))
pairs = preprocess(seqdict, links)
self.assertListEqual(list(pairs.shape), [100, 2])
class TestInteractionDataset(unittest.TestCase):
def setUp(self):
self.links_file = get_data_path('links.txt')
self.fasta_file = get_data_path('prots.fa')
self.seqs = list(SeqIO.parse(self.fasta_file, format='fasta'))
links = pd.read_table(self.links_file, header=None)
truncseqs = list(map(clean, self.seqs))
seqids = list(map(lambda x: x.id, truncseqs))
seqdict = dict(zip(seqids, truncseqs))
self.pairs = preprocess(seqdict, links)
def test_sort(self):
pass
def test_random_peptide(self):
# Test the random_peptide function
# to make sure that peptides are sampled
# uniformly from the database
np.random.seed(0)
sampler = NegativeSampler(self.seqs)
intsd = InteractionDataset(self.pairs, sampler)
res = intsd.random_peptide()
seqset = list(map(clean, self.seqs))
seqset = set(map(lambda x: x.seq, seqset))
self.assertIn(res, seqset)
def test_getitem(self):
np.random.seed(1)
sampler = NegativeSampler(self.seqs)
intsd = InteractionDataset(self.pairs, sampler)
gene, pos, neg = intsd[0]
exp_gene = list(
'MINEIKKEAQERMGKTLEALGHAFAKIRTGRAHPSILDSVMVSYYGADTPLRQVANVTV'
'EDSRTLALAVFDKSMIQAVEKAIMTSDLGLNPATAGTTIRVPMPALTEETRKGYTKQAR'
'AEAEQARVSVRNIRRDALAQLKDLQKEKEISEDEERRAGDDVQKLTDKFIGEIEKALEA'
'KEADLMAV'
)
exp_pos = list(
'MMRSHYCGQLNESLDGQEVTLCGWVHRRRDHGGVIFLDVRDREGLAQVVFDPDRAETFA'
'KADRVRSEFVVKITGKVRLRPEGARNPNMASGSIEVLGYELEVLNQAETPPFPLDEYSD'
'VGEETRLRYRFIDLRRPEMAAKLKLRARITSSIRRYLDDNGFLDVETPILGRPTPEGAR'
'DYLVPSRTYPGHFFALPQSPQLFKQLLMVAGFDRYYQIAKCFRDEDLRADRQPEFTQID'
'IETSFLDESDIIGITEKMVRQLFKEVLDVEFDEFPHMPFEEAMRRYGSDKPDLRIPLEL'
'VDVADQLKEVEFKVFSGPANDPKGRVAALRVPGAASMPRSQIDDYTKFVGIYGAKGLAY'
'IKVNERAKGVEGLQSPIVKFIPEANLNVILDRVGAVDGDIVFFGADKAKIVCDALGALR'
'IKVGHDLKLLTREWAPMWVVDFPMFEENDDGSLSALHHPFTSPKCTPAELEANPGAALS'
'RAYDMVLNGTELGGGSIRIHDKSMQQAVFRVLGIDEAEQEEKFGFLLDALKYGAPPHGG'
'LAFGLDRLVMLMTGASSIREVIAFPKTQSAGDVMTQAPGSVDGKALRELHIRLREQPKAE'
)
exp_neg = list(
'MILELDCGNSLIKWRVIEGAARSVAGGLAESDDALVEQLTSQQALPVRACRLVSVRSEQ'
'ETSQLVARLEQLFPVSALVASSGKQLAGVRNGYLDYQRLGLDRWLALVAAHHLAKKACL'
'VIDLGTAVTSDLVAADGVHLGGYICPGMTLMRSQLRTHTRRIRYDDAEARRALASLQPG'
'QATAEAVERGCLLMLRGFVREQYAMACELLGPDCEIFLTGGDAELVRDELAGARIMPDL'
'VFVGLALACPIE'
)
self.assertListEqual(list(gene), exp_gene)
self.assertListEqual(list(pos), exp_pos)
self.assertListEqual(list(neg), exp_neg)
def test_iter(self):
# Test the iter function to make sure
# negative samples are being drawn
np.random.seed(0)
sampler = NegativeSampler(self.seqs)
intsd = InteractionDataset(self.pairs, sampler)
res = [r for r in intsd]
self.assertEqual(len(res), self.pairs.shape[0] * intsd.num_neg)
class TestValidationDataset(unittest.TestCase):
def setUp(self):
self.links_file = get_data_path('links.txt')
self.fasta_file = get_data_path('prots.fa')
self.seqs = list(SeqIO.parse(self.fasta_file, format='fasta'))
self.links = pd.read_table(self.links_file, header=None)
truncseqs = list(map(clean, self.seqs))
seqids = list(map(lambda x: x.id, truncseqs))
seqdict = dict(zip(seqids, truncseqs))
self.pairs = preprocess(seqdict, self.links)
def test_getitem(self):
np.random.seed(0)
sampler = NegativeSampler(self.seqs)
intsd = ValidationDataset(self.pairs, self.links, sampler)
gene, pos, rnd, protid, taxa = intsd[0]
exp_gene = list(
'MINEIKKEAQERMGKTLEALGHAFAKIRTGRAHPSILDSVMVSYYGADTPLRQVANVTV'
'EDSRTLALAVFDKSMIQAVEKAIMTSDLGLNPATAGTTIRVPMPALTEETRKGYTKQAR'
'AEAEQARVSVRNIRRDALAQLKDLQKEKEISEDEERRAGDDVQKLTDKFIGEIEKALEA'
'KEADLMAV'
)
exp_pos = list(
'MMRSHYCGQLNESLDGQEVTLCGWVHRRRDHGGVIFLDVRDREGLAQVVFDPDRAETFA'
'KADRVRSEFVVKITGKVRLRPEGARNPNMASGSIEVLGYELEVLNQAETPPFPLDEYSD'
'VGEETRLRYRFIDLRRPEMAAKLKLRARITSSIRRYLDDNGFLDVETPILGRPTPEGAR'
'DYLVPSRTYPGHFFALPQSPQLFKQLLMVAGFDRYYQIAKCFRDEDLRADRQPEFTQID'
'IETSFLDESDIIGITEKMVRQLFKEVLDVEFDEFPHMPFEEAMRRYGSDKPDLRIPLEL'
'VDVADQLKEVEFKVFSGPANDPKGRVAALRVPGAASMPRSQIDDYTKFVGIYGAKGLAY'
'IKVNERAKGVEGLQSPIVKFIPEANLNVILDRVGAVDGDIVFFGADKAKIVCDALGALR'
'IKVGHDLKLLTREWAPMWVVDFPMFEENDDGSLSALHHPFTSPKCTPAELEANPGAALS'
'RAYDMVLNGTELGGGSIRIHDKSMQQAVFRVLGIDEAEQEEKFGFLLDALKYGAPPHGG'
'LAFGLDRLVMLMTGASSIREVIAFPKTQSAGDVMTQAPGSVDGKALRELHIRLREQPKAE'
)
exp_rnd = list(
'MINEIKKEAQERMGKTLEALGHAFAKIRTGRAHPSILDSVMVSYYGADTPLRQVANVTV'
'EDSRTLALAVFDKSMIQAVEKAIMTSDLGLNPATAGTTIRVPMPALTEETRKGYTKQAR'
'AEAEQARVSVRNIRRDALAQLKDLQKEKEISEDEERRAGDDVQKLTDKFIGEIEKALEA'
'KEADLMAV'
)
self.assertListEqual(list(gene), exp_gene)
self.assertListEqual(list(pos), exp_pos)
self.assertListEqual(list(rnd), exp_rnd)
self.assertEqual(protid, '287.DR97_4286')
self.assertEqual(taxa, 287)
def test_iter(self):
# Test the iter function to make sure
# negative samples are being drawn
np.random.seed(0)
sampler = NegativeSampler(self.seqs)
intsd = ValidationDataset(self.pairs, self.links, sampler)
res = [r for r in intsd]
self.assertEqual(len(res), self.pairs.shape[0] * intsd.num_neg)
gene, pos, rnd, idx, taxa = list(zip(*res))
ids = list(zip(idx, taxa))
# make sure that if sorted, the list will be in the same order
sorted_idx = sorted(ids, key=lambda x: (x[0], x[1]))
self.assertListEqual(sorted_idx, ids)
class TestParse(unittest.TestCase):
def test_parse_links(self):
# Make sure that a validate dataloader is added
batch_size = 1
self.links_file = get_data_path('links.txt')
self.fasta_file = get_data_path('prots.fa')
res = parse(self.fasta_file, self.links_file,
training_column=4,
batch_size=batch_size,
num_workers=1, arm_the_gpu=False)
self.assertEqual(len(res), 3)
train, test, valid = res
i = 0
for g, p, n in train:
i+= 1
self.assertEqual(len(train), 83)
i = 0
for g, p, n in test:
i+= 1
self.assertEqual(len(test), 12)
# Make sure that a validate dataloader is added
i = 0
for g, p, n in valid:
i+= 1
self.assertEqual(len(valid), 5)
def test_parse_positive(self):
batch_size = 1
self.links_file = get_data_path('positive.txt')
self.fasta_file = get_data_path('prots.fa')
res = parse(self.fasta_file, self.links_file,
training_column=4,
batch_size=batch_size,
num_workers=1, arm_the_gpu=False)
self.assertEqual(len(res), 3)
self.assertIsNone(res[0])
self.assertIsNone(res[1])
self.assertIsNotNone(res[2])
self.assertEqual(len(res[2]), 2)
def test_parse_negative(self):
batch_size = 1
self.links_file = get_data_path('negative.txt')
self.fasta_file = get_data_path('prots.fa')
res = parse(self.fasta_file, self.links_file,
training_column=4,
batch_size=batch_size,
num_workers=1, arm_the_gpu=False)
self.assertEqual(len(res), 3)
self.assertIsNone(res[0])
self.assertIsNotNone(res[1])
self.assertIsNotNone(res[2])
self.assertEqual(len(res[2]), 2)
if __name__ == "__main__":
unittest.main()
| 37.531646
| 74
| 0.659584
| 850
| 8,895
| 6.767059
| 0.194118
| 0.025035
| 0.024861
| 0.031293
| 0.773644
| 0.726878
| 0.726878
| 0.713317
| 0.700104
| 0.68637
| 0
| 0.009044
| 0.254188
| 8,895
| 236
| 75
| 37.690678
| 0.858004
| 0.043845
| 0
| 0.629032
| 0
| 0
| 0.250648
| 0.229456
| 0
| 0
| 0
| 0
| 0.145161
| 1
| 0.069892
| false
| 0.005376
| 0.032258
| 0
| 0.123656
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
250e9aa11fd5ab6fee5f166cc9bb45e4ae61ff5f
| 228
|
py
|
Python
|
MicroPython_RTL8722/ports/rtl8722/mp_scripts/boot.py
|
yodaliu/ambd_micropython
|
d4ba4ba137cd6f68aaa64e6577f0359e2b4d09d4
|
[
"MIT"
] | 11
|
2020-10-11T15:12:48.000Z
|
2022-02-28T01:46:07.000Z
|
MicroPython_RTL8722/ports/rtl8722/mp_scripts/boot.py
|
yodaliu/ambd_micropython
|
d4ba4ba137cd6f68aaa64e6577f0359e2b4d09d4
|
[
"MIT"
] | 9
|
2020-10-09T07:34:17.000Z
|
2021-08-30T12:06:22.000Z
|
MicroPython_RTL8722/ports/rtl8722/mp_scripts/boot.py
|
yodaliu/ambd_micropython
|
d4ba4ba137cd6f68aaa64e6577f0359e2b4d09d4
|
[
"MIT"
] | 10
|
2020-09-29T03:11:28.000Z
|
2022-01-24T02:42:39.000Z
|
print()
import machine, wireless ,time, modules, socket, sdfs
from wireless import WLAN
from machine import Pin, UART, Timer, RTC, PWM, I2C, SPI, ADC
from socket import SOCK
print("[MP]: Imported all builtin libraries")
print()
| 28.5
| 61
| 0.754386
| 34
| 228
| 5.058824
| 0.705882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005128
| 0.144737
| 228
| 7
| 62
| 32.571429
| 0.876923
| 0
| 0
| 0.285714
| 0
| 0
| 0.157895
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.714286
| 0
| 0.714286
| 0.428571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 5
|
2527233c7898226c62d6a6bab101677aed72e166
| 37
|
py
|
Python
|
colorpicker/__init__.py
|
bleck9999/pyqt-colorpicker
|
d8289dd50202b19a10d0b4a363d7fb19e18feb6c
|
[
"MIT"
] | 15
|
2020-10-30T20:02:40.000Z
|
2021-12-27T13:09:01.000Z
|
colorpicker/__init__.py
|
bleck9999/pyqt-colorpicker
|
d8289dd50202b19a10d0b4a363d7fb19e18feb6c
|
[
"MIT"
] | 1
|
2020-10-27T20:08:56.000Z
|
2020-11-02T16:41:24.000Z
|
colorpicker/__init__.py
|
bleck9999/pyqt-colorpicker
|
d8289dd50202b19a10d0b4a363d7fb19e18feb6c
|
[
"MIT"
] | 5
|
2021-01-04T20:12:35.000Z
|
2021-11-29T09:26:02.000Z
|
from .colorpicker import ColorPicker
| 18.5
| 36
| 0.864865
| 4
| 37
| 8
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108108
| 37
| 1
| 37
| 37
| 0.969697
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
254c2eb4a5e0d314148068cab172ba1a2b398f22
| 40
|
py
|
Python
|
opinion/opinion/error.py
|
benlevyx/opinion-vs-fact
|
5063adc16e37b0b47cb6b55494866c31133281d4
|
[
"MIT"
] | null | null | null |
opinion/opinion/error.py
|
benlevyx/opinion-vs-fact
|
5063adc16e37b0b47cb6b55494866c31133281d4
|
[
"MIT"
] | 1
|
2021-06-02T00:59:17.000Z
|
2021-06-02T00:59:17.000Z
|
opinion/opinion/error.py
|
benlevyx/opinion-vs-fact
|
5063adc16e37b0b47cb6b55494866c31133281d4
|
[
"MIT"
] | null | null | null |
class ArticleError(Exception):
pass
| 13.333333
| 30
| 0.75
| 4
| 40
| 7.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.175
| 40
| 2
| 31
| 20
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
255f8cec5885680247753309d6193c976fd209b4
| 83
|
py
|
Python
|
run_etl.py
|
carlps/bikeshare
|
cb20a74a4428686b7e91121b1b03c0bb77f4176c
|
[
"MIT"
] | 2
|
2019-05-19T12:00:23.000Z
|
2019-05-21T16:06:35.000Z
|
run_etl.py
|
yassmhd/bikeshare
|
cb20a74a4428686b7e91121b1b03c0bb77f4176c
|
[
"MIT"
] | 1
|
2018-05-14T14:51:33.000Z
|
2018-05-14T14:51:33.000Z
|
run_etl.py
|
yassmhd/bikeshare
|
cb20a74a4428686b7e91121b1b03c0bb77f4176c
|
[
"MIT"
] | 1
|
2019-05-31T02:27:24.000Z
|
2019-05-31T02:27:24.000Z
|
from src import bikeshare_etl
if __name__ == '__main__':
bikeshare_etl.main()
| 16.6
| 29
| 0.73494
| 11
| 83
| 4.636364
| 0.727273
| 0.470588
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.168675
| 83
| 4
| 30
| 20.75
| 0.73913
| 0
| 0
| 0
| 0
| 0
| 0.096386
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
c253882e95b1777fe2c97e89ccb889cadf1ecab9
| 340
|
py
|
Python
|
basicmonitor/actions/__init__.py
|
TorbenFricke/basicmonitor
|
c636f2d7efc80831008c23fa4b6030b3183d505f
|
[
"CC-BY-4.0",
"MIT"
] | null | null | null |
basicmonitor/actions/__init__.py
|
TorbenFricke/basicmonitor
|
c636f2d7efc80831008c23fa4b6030b3183d505f
|
[
"CC-BY-4.0",
"MIT"
] | null | null | null |
basicmonitor/actions/__init__.py
|
TorbenFricke/basicmonitor
|
c636f2d7efc80831008c23fa4b6030b3183d505f
|
[
"CC-BY-4.0",
"MIT"
] | null | null | null |
from basicmonitor.actions.base import Action, DebugAction
from basicmonitor.actions.manager import ActionManager
from basicmonitor.actions.pushover import PushoverAction
from basicmonitor.actions.webhook import WebhookAction
from basicmonitor.actions.reboot import RebootAction
actions_available = list(Action.subclasses_by_name().keys())
| 42.5
| 60
| 0.867647
| 39
| 340
| 7.487179
| 0.538462
| 0.273973
| 0.393836
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.073529
| 340
| 8
| 60
| 42.5
| 0.926984
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.833333
| 0
| 0.833333
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
c282c31945c4720e728970986dcf1b942e2d8161
| 39
|
py
|
Python
|
tests/components/version/__init__.py
|
domwillcode/home-assistant
|
f170c80bea70c939c098b5c88320a1c789858958
|
[
"Apache-2.0"
] | 30,023
|
2016-04-13T10:17:53.000Z
|
2020-03-02T12:56:31.000Z
|
tests/components/version/__init__.py
|
jagadeeshvenkatesh/core
|
1bd982668449815fee2105478569f8e4b5670add
|
[
"Apache-2.0"
] | 31,101
|
2020-03-02T13:00:16.000Z
|
2022-03-31T23:57:36.000Z
|
tests/components/version/__init__.py
|
jagadeeshvenkatesh/core
|
1bd982668449815fee2105478569f8e4b5670add
|
[
"Apache-2.0"
] | 11,956
|
2016-04-13T18:42:31.000Z
|
2020-03-02T09:32:12.000Z
|
"""Tests for the version component."""
| 19.5
| 38
| 0.692308
| 5
| 39
| 5.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128205
| 39
| 1
| 39
| 39
| 0.794118
| 0.820513
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
c2e2c0f225567e229e6137adf672cd8f86b1361f
| 75
|
py
|
Python
|
hello.py
|
Kunal3Kumar/Assignment
|
80bca7da69e372016c6b0faee745d133205c4bf6
|
[
"MIT"
] | 1
|
2021-08-13T10:19:32.000Z
|
2021-08-13T10:19:32.000Z
|
hello.py
|
Kunal3Kumar/Assignment
|
80bca7da69e372016c6b0faee745d133205c4bf6
|
[
"MIT"
] | null | null | null |
hello.py
|
Kunal3Kumar/Assignment
|
80bca7da69e372016c6b0faee745d133205c4bf6
|
[
"MIT"
] | null | null | null |
# Write a Progrme to print your name.
print("Hello Kunal Pandey !!!!!")
| 25
| 39
| 0.653333
| 11
| 75
| 4.454545
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 75
| 3
| 40
| 25
| 0.816667
| 0.466667
| 0
| 0
| 0
| 0
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
c2e8e1a365756a7559c8a1c9e3564fcb3f5a60d3
| 59
|
py
|
Python
|
picorss/src/infrastructure/models/__init__.py
|
rok-povsic/picorss
|
7c182953868e56389d5c080f3c0b75d7c0fafa74
|
[
"MIT"
] | null | null | null |
picorss/src/infrastructure/models/__init__.py
|
rok-povsic/picorss
|
7c182953868e56389d5c080f3c0b75d7c0fafa74
|
[
"MIT"
] | null | null | null |
picorss/src/infrastructure/models/__init__.py
|
rok-povsic/picorss
|
7c182953868e56389d5c080f3c0b75d7c0fafa74
|
[
"MIT"
] | null | null | null |
from picorss.src.infrastructure.models.page import RssPage
| 29.5
| 58
| 0.864407
| 8
| 59
| 6.375
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.067797
| 59
| 1
| 59
| 59
| 0.927273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
6c31d52e25fdcb62ea7a0efcd6e30537202c20e5
| 11
|
py
|
Python
|
data/studio21_generated/introductory/4503/starter_code.py
|
vijaykumawat256/Prompt-Summarization
|
614f5911e2acd2933440d909de2b4f86653dc214
|
[
"Apache-2.0"
] | null | null | null |
data/studio21_generated/introductory/4503/starter_code.py
|
vijaykumawat256/Prompt-Summarization
|
614f5911e2acd2933440d909de2b4f86653dc214
|
[
"Apache-2.0"
] | null | null | null |
data/studio21_generated/introductory/4503/starter_code.py
|
vijaykumawat256/Prompt-Summarization
|
614f5911e2acd2933440d909de2b4f86653dc214
|
[
"Apache-2.0"
] | null | null | null |
def f(n):
| 5.5
| 9
| 0.454545
| 3
| 11
| 1.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.272727
| 11
| 2
| 10
| 5.5
| 0.625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
6c4995a01f12a925ff49ac1a1e931682d0c69c90
| 1,351
|
py
|
Python
|
build/sensor_actuator/cmake/sensor_actuator-genmsg-context.py
|
kaiodt/kaio_ros_ws
|
d9ee0edb97d16cf2a0a6074fecd049db7367a032
|
[
"BSD-2-Clause"
] | null | null | null |
build/sensor_actuator/cmake/sensor_actuator-genmsg-context.py
|
kaiodt/kaio_ros_ws
|
d9ee0edb97d16cf2a0a6074fecd049db7367a032
|
[
"BSD-2-Clause"
] | null | null | null |
build/sensor_actuator/cmake/sensor_actuator-genmsg-context.py
|
kaiodt/kaio_ros_ws
|
d9ee0edb97d16cf2a0a6074fecd049db7367a032
|
[
"BSD-2-Clause"
] | null | null | null |
# generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/home/kaiodt/kaio_ros_ws/devel/share/sensor_actuator/msg/RotationAction.msg;/home/kaiodt/kaio_ros_ws/devel/share/sensor_actuator/msg/RotationActionGoal.msg;/home/kaiodt/kaio_ros_ws/devel/share/sensor_actuator/msg/RotationActionResult.msg;/home/kaiodt/kaio_ros_ws/devel/share/sensor_actuator/msg/RotationActionFeedback.msg;/home/kaiodt/kaio_ros_ws/devel/share/sensor_actuator/msg/RotationGoal.msg;/home/kaiodt/kaio_ros_ws/devel/share/sensor_actuator/msg/RotationResult.msg;/home/kaiodt/kaio_ros_ws/devel/share/sensor_actuator/msg/RotationFeedback.msg"
services_str = "/home/kaiodt/kaio_ros_ws/src/sensor_actuator/srv/FakeSensor.srv;/home/kaiodt/kaio_ros_ws/src/sensor_actuator/srv/Light.srv"
pkg_name = "sensor_actuator"
dependencies_str = "std_msgs;geometry_msgs;actionlib_msgs"
langs = "gencpp;geneus;genlisp;genpy"
dep_include_paths_str = "sensor_actuator;/home/kaiodt/kaio_ros_ws/devel/share/sensor_actuator/msg;std_msgs;/opt/ros/indigo/share/std_msgs/cmake/../msg;geometry_msgs;/opt/ros/indigo/share/geometry_msgs/cmake/../msg;actionlib_msgs;/opt/ros/indigo/share/actionlib_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python"
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/indigo/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
| 112.583333
| 566
| 0.831236
| 207
| 1,351
| 5.149758
| 0.294686
| 0.157599
| 0.131332
| 0.159475
| 0.5
| 0.440901
| 0.435272
| 0.435272
| 0.435272
| 0.362101
| 0
| 0
| 0.025167
| 1,351
| 11
| 567
| 122.818182
| 0.809415
| 0.036269
| 0
| 0
| 1
| 0.333333
| 0.843846
| 0.817692
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
6c5eb0539ff88d38653fcd00d2c4209953a73fd6
| 87
|
py
|
Python
|
cmake_setuptools/__init__.py
|
valgur/cmake_setuptools
|
53aed85bbe14bd25caade54e8c1b3f7b1779ad6e
|
[
"Apache-2.0"
] | 8
|
2019-04-12T06:39:59.000Z
|
2020-03-24T06:37:48.000Z
|
cmake_setuptools/__init__.py
|
valgur/cmake_setuptools
|
53aed85bbe14bd25caade54e8c1b3f7b1779ad6e
|
[
"Apache-2.0"
] | 6
|
2019-05-07T20:41:22.000Z
|
2020-04-01T22:07:05.000Z
|
cmake_setuptools/__init__.py
|
valgur/cmake_setuptools
|
53aed85bbe14bd25caade54e8c1b3f7b1779ad6e
|
[
"Apache-2.0"
] | 6
|
2019-05-07T16:16:58.000Z
|
2020-05-06T10:11:44.000Z
|
__version__ = '0.1.1'
from .cmake import *
from .headers import *
from .utils import *
| 17.4
| 22
| 0.701149
| 13
| 87
| 4.384615
| 0.615385
| 0.350877
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.041667
| 0.172414
| 87
| 5
| 23
| 17.4
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0.056818
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
6c6a29998d1d4c7e0b474d4037a98e1b43b5fbc8
| 73
|
py
|
Python
|
arcade/python/arcade-theCore/17_RegularHell/140_SwapAdjacentWords.py
|
netor27/codefights-arcade-solutions
|
69701ab06d45902c79ec9221137f90b75969d8c8
|
[
"MIT"
] | null | null | null |
arcade/python/arcade-theCore/17_RegularHell/140_SwapAdjacentWords.py
|
netor27/codefights-arcade-solutions
|
69701ab06d45902c79ec9221137f90b75969d8c8
|
[
"MIT"
] | null | null | null |
arcade/python/arcade-theCore/17_RegularHell/140_SwapAdjacentWords.py
|
netor27/codefights-arcade-solutions
|
69701ab06d45902c79ec9221137f90b75969d8c8
|
[
"MIT"
] | null | null | null |
def swapAdjacentWords(s):
return re.sub(r'(\w+) (\w+)', r'\2 \1', s)
| 24.333333
| 46
| 0.534247
| 13
| 73
| 3
| 0.769231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.032787
| 0.164384
| 73
| 2
| 47
| 36.5
| 0.606557
| 0
| 0
| 0
| 0
| 0
| 0.219178
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
6c73733bdd3f667bbf822b02a7bda004b6fe61b2
| 133
|
py
|
Python
|
predavanje2/konverzija_tipova_podataka.py
|
Miillky/uvod_u_programiranje
|
209611e38c8fe84c727649df4b868a4278eb77c3
|
[
"MIT"
] | null | null | null |
predavanje2/konverzija_tipova_podataka.py
|
Miillky/uvod_u_programiranje
|
209611e38c8fe84c727649df4b868a4278eb77c3
|
[
"MIT"
] | null | null | null |
predavanje2/konverzija_tipova_podataka.py
|
Miillky/uvod_u_programiranje
|
209611e38c8fe84c727649df4b868a4278eb77c3
|
[
"MIT"
] | null | null | null |
int(10.6)
#10
float(100)
#100.0
str(100)
#'100'
int('1001')
#1001
int('1001s')
# Error
bool(1)
# True
bool(0)
# False
bool(-1)
# True
| 8.3125
| 12
| 0.609023
| 26
| 133
| 3.115385
| 0.538462
| 0.148148
| 0.222222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.289474
| 0.142857
| 133
| 16
| 13
| 8.3125
| 0.421053
| 0.285714
| 0
| 0
| 0
| 0
| 0.102273
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
6c8c1e481638b568891616e62676a98ad104f6db
| 1,037
|
py
|
Python
|
bamboolean/tests/test_normalize.py
|
qedsoftware/bamboolean
|
4d7c720cd793d83a343d1048a15e03fac7cea31c
|
[
"MIT"
] | 5
|
2018-03-16T14:31:52.000Z
|
2020-07-10T13:07:55.000Z
|
bamboolean/tests/test_normalize.py
|
qedsoftware/bamboolean
|
4d7c720cd793d83a343d1048a15e03fac7cea31c
|
[
"MIT"
] | null | null | null |
bamboolean/tests/test_normalize.py
|
qedsoftware/bamboolean
|
4d7c720cd793d83a343d1048a15e03fac7cea31c
|
[
"MIT"
] | 3
|
2018-04-05T06:59:30.000Z
|
2019-12-05T14:28:28.000Z
|
import unittest
from bamboolean.factories import normalize
class NormalizeExpr(unittest.TestCase):
def test_normalize_should_not_change_normalized_terms(self):
self.assertEqual(normalize('false'), 'false')
def test_normalize_negation(self):
self.assertEqual(normalize('not x'), 'not x')
self.assertEqual(normalize('not not x'), 'x')
self.assertEqual(normalize('not not not x'), 'not x')
def test_normalize_binop(self):
self.assertEqual(normalize('not (x and not y)'), '(not x or y)')
self.assertEqual(normalize('x or not not y'), '(x or y)')
def test_normalize_empty(self):
self.assertEqual(normalize(''), '')
def test_normalize_bool(self):
self.assertEqual(normalize('not false'), 'true')
def test_normalize_relop(self):
self.assertEqual(normalize('not (x >= 42)'), 'x < 42')
self.assertEqual(normalize('not (x > 42)'), 'x <= 42')
self.assertEqual(
normalize('not (x > 42 and y)'), '(x <= 42 or not y)')
| 34.566667
| 72
| 0.640309
| 134
| 1,037
| 4.835821
| 0.223881
| 0.25463
| 0.407407
| 0.333333
| 0.396605
| 0.348765
| 0.148148
| 0.148148
| 0.148148
| 0.148148
| 0
| 0.014634
| 0.209257
| 1,037
| 29
| 73
| 35.758621
| 0.77561
| 0
| 0
| 0
| 0
| 0
| 0.179364
| 0
| 0
| 0
| 0
| 0
| 0.52381
| 1
| 0.285714
| false
| 0
| 0.095238
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
665a0b50b07eb9b911f15b8d899b27d0289aadc1
| 25
|
py
|
Python
|
src/masonite/providers/Provider.py
|
cercos/masonite
|
f7f220efa7fae833683e9f07ce13c3795a87d3b8
|
[
"MIT"
] | 1,816
|
2018-02-14T01:59:51.000Z
|
2022-03-31T17:09:20.000Z
|
src/masonite/providers/Provider.py
|
cercos/masonite
|
f7f220efa7fae833683e9f07ce13c3795a87d3b8
|
[
"MIT"
] | 340
|
2018-02-11T00:27:26.000Z
|
2022-03-21T12:00:24.000Z
|
src/masonite/providers/Provider.py
|
cercos/masonite
|
f7f220efa7fae833683e9f07ce13c3795a87d3b8
|
[
"MIT"
] | 144
|
2018-03-18T00:08:16.000Z
|
2022-02-26T01:51:58.000Z
|
class Provider:
pass
| 8.333333
| 15
| 0.68
| 3
| 25
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.28
| 25
| 2
| 16
| 12.5
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
665a80457c1acbb43d08c616ba1757a2da4ee871
| 82
|
py
|
Python
|
argparseqt/parser/__init__.py
|
Adanteh/argparseqt
|
980b613b60576a51a71af0e09e7c9f1b7f651f6c
|
[
"MIT"
] | null | null | null |
argparseqt/parser/__init__.py
|
Adanteh/argparseqt
|
980b613b60576a51a71af0e09e7c9f1b7f651f6c
|
[
"MIT"
] | null | null | null |
argparseqt/parser/__init__.py
|
Adanteh/argparseqt
|
980b613b60576a51a71af0e09e7c9f1b7f651f6c
|
[
"MIT"
] | null | null | null |
from . import groupingTools # noqa: F401
from . import typeHelpers # noqa: F401
| 27.333333
| 41
| 0.731707
| 10
| 82
| 6
| 0.6
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 0.195122
| 82
| 2
| 42
| 41
| 0.818182
| 0.256098
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
667b8366d33e6b6a6c9bd6bc3a811ef170e01e64
| 350
|
py
|
Python
|
esmtools/tests/test_checks.py
|
luke-gregor/esmtools
|
27e58176ce8f00d1fd94279bdcfba242fd9c8de3
|
[
"MIT"
] | 20
|
2019-10-02T12:02:49.000Z
|
2022-01-28T23:08:23.000Z
|
esmtools/tests/test_checks.py
|
luke-gregor/esmtools
|
27e58176ce8f00d1fd94279bdcfba242fd9c8de3
|
[
"MIT"
] | 84
|
2018-09-20T21:28:59.000Z
|
2021-08-17T16:21:22.000Z
|
esmtools/tests/test_checks.py
|
luke-gregor/esmtools
|
27e58176ce8f00d1fd94279bdcfba242fd9c8de3
|
[
"MIT"
] | 6
|
2018-09-21T05:04:22.000Z
|
2020-11-11T18:51:07.000Z
|
from esmtools.checks import has_missing
def test_has_missing(gridded_da_float, gridded_da_landmask, gridded_da_missing_data):
"""Tests that `has_missing` function works with various NaN configurations."""
assert not has_missing(gridded_da_float())
assert has_missing(gridded_da_landmask)
assert has_missing(gridded_da_missing_data)
| 38.888889
| 85
| 0.814286
| 50
| 350
| 5.28
| 0.46
| 0.227273
| 0.257576
| 0.287879
| 0.371212
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12
| 350
| 8
| 86
| 43.75
| 0.857143
| 0.205714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.6
| 1
| 0.2
| false
| 0
| 0.2
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
66b71dab26c3bb7cf620e776131985b44a4409c2
| 249
|
py
|
Python
|
cords/utils/__init__.py
|
SatyadevNtv/cords
|
f309772dec452b19e36c12104b84181c64d2d809
|
[
"MIT"
] | null | null | null |
cords/utils/__init__.py
|
SatyadevNtv/cords
|
f309772dec452b19e36c12104b84181c64d2d809
|
[
"MIT"
] | null | null | null |
cords/utils/__init__.py
|
SatyadevNtv/cords
|
f309772dec452b19e36c12104b84181c64d2d809
|
[
"MIT"
] | null | null | null |
# __init__.py
# Author: Krishnateja Killamsetty <krishnatejakillamsetty@gmail.com>
from .custom_dataset import CustomDataset
from .custom_dataset import load_dataset_custom
from .utils import generate_cumulative_timing
from .utils import logtoxl
| 24.9
| 68
| 0.84739
| 30
| 249
| 6.7
| 0.633333
| 0.099502
| 0.169154
| 0.228856
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.104418
| 249
| 9
| 69
| 27.666667
| 0.901345
| 0.313253
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
66c0413c485965e01b83742d4416ba8488865792
| 303
|
py
|
Python
|
Examples/pytest/conftest.py
|
tony-hinterland/unium
|
2ff9e6b70b49697a26e5b5a69483e1c6d2e3af80
|
[
"MIT"
] | 187
|
2017-08-16T01:16:28.000Z
|
2022-03-31T09:40:18.000Z
|
Examples/pytest/conftest.py
|
tony-hinterland/unium
|
2ff9e6b70b49697a26e5b5a69483e1c6d2e3af80
|
[
"MIT"
] | 80
|
2017-08-25T17:55:48.000Z
|
2022-02-02T13:22:36.000Z
|
Examples/pytest/conftest.py
|
tony-hinterland/unium
|
2ff9e6b70b49697a26e5b5a69483e1c6d2e3af80
|
[
"MIT"
] | 44
|
2017-10-07T13:48:25.000Z
|
2022-03-31T09:40:29.000Z
|
# Fixtures provide test setup and configuration
# https://docs.pytest.org/en/latest/fixture.html#fixtures
import pytest
@pytest.fixture( scope="session" )
def unium_endpoint():
return "http://localhost:8342"
@pytest.fixture( scope="session" )
def unium_socket():
return "ws://localhost:8342/ws"
| 21.642857
| 57
| 0.742574
| 40
| 303
| 5.575
| 0.65
| 0.116592
| 0.161435
| 0.224215
| 0.295964
| 0.295964
| 0
| 0
| 0
| 0
| 0
| 0.02963
| 0.108911
| 303
| 13
| 58
| 23.307692
| 0.796296
| 0.330033
| 0
| 0.285714
| 0
| 0
| 0.287879
| 0.111111
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| true
| 0
| 0.142857
| 0.285714
| 0.714286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
66ee7bf02eb987e1df663d2f47a0d88bcf7d3f53
| 16,572
|
py
|
Python
|
tests/apollo/test_skvbc_dbsnapshot.py
|
nkumar04/concord-bft
|
6fac43e8e6cca540fc06459b2293af333696d45b
|
[
"Apache-2.0"
] | 1
|
2021-05-18T19:01:47.000Z
|
2021-05-18T19:01:47.000Z
|
tests/apollo/test_skvbc_dbsnapshot.py
|
nkumar04/concord-bft
|
6fac43e8e6cca540fc06459b2293af333696d45b
|
[
"Apache-2.0"
] | null | null | null |
tests/apollo/test_skvbc_dbsnapshot.py
|
nkumar04/concord-bft
|
6fac43e8e6cca540fc06459b2293af333696d45b
|
[
"Apache-2.0"
] | null | null | null |
# Concord
#
# Copyright (c) 2021 VMware, Inc. All Rights Reserved.
#
# This product is licensed to you under the Apache 2.0 license (the "License").
# You may not use this product except in compliance with the Apache 2.0 License.
#
# This product may include a number of subcomponents with separate copyright
# notices and license terms. Your use of these subcomponents is subject to the
# terms and conditions of the subcomponent's license, as noted in the LICENSE
# file.
import unittest
import trio
import os.path
import random
import time
import difflib
import subprocess
import shutil
from util import bft
from util import skvbc as kvbc
from util.skvbc import SimpleKVBCProtocol
from util.skvbc_history_tracker import verify_linearizability
from util.bft import with_trio, with_bft_network, KEY_FILE_PREFIX, DB_FILE_PREFIX, DB_SNAPSHOT_PREFIX
from util import bft_metrics, eliot_logging as log
from util.object_store import ObjectStore, start_replica_cmd_prefix, with_object_store
from util import operator
import concord_msgs as cmf_msgs
import sys
sys.path.append(os.path.abspath("../../util/pyclient"))
import bft_client
def start_replica_cmd(builddir, replica_id):
"""
Return a command that starts an skvbc replica when passed to
subprocess.Popen.
Note each arguments is an element in a list.
"""
statusTimerMilli = "500"
viewChangeTimeoutMilli = "10000"
path = os.path.join(builddir, "tests", "simpleKVBC", "TesterReplica", "skvbc_replica")
if os.environ.get('TIME_SERVICE_ENABLED', default="FALSE").lower() == "true" :
batch_size = "2"
time_service_enabled = "1"
else :
batch_size = "1"
time_service_enabled = "0"
return [path,
"-k", KEY_FILE_PREFIX,
"-i", str(replica_id),
"-s", statusTimerMilli,
"-v", viewChangeTimeoutMilli,
"-l", os.path.join(builddir, "tests", "simpleKVBC", "scripts", "logging.properties"),
"-f", time_service_enabled,
"-b", "2",
"-q", batch_size,
"-h", "3",
"-j", "150",
"-o", builddir + "/operator_pub.pem"]
def start_replica_cmd_with_operator(builddir, replica_id):
"""
Return a command with operator that starts an skvbc replica when passed to
subprocess.Popen.
Note each arguments is an element in a list.
"""
statusTimerMilli = "500"
viewChangeTimeoutMilli = "10000"
path = os.path.join(builddir, "tests", "simpleKVBC", "TesterReplica", "skvbc_replica")
if os.environ.get('TIME_SERVICE_ENABLED', default="FALSE").lower() == "true" :
batch_size = "2"
time_service_enabled = "1"
else :
batch_size = "1"
time_service_enabled = "0"
return [path,
"-k", KEY_FILE_PREFIX,
"-i", str(replica_id),
"-s", statusTimerMilli,
"-v", viewChangeTimeoutMilli,
"-l", os.path.join(builddir, "tests", "simpleKVBC", "scripts", "logging.properties"),
"-f", time_service_enabled,
"-b", "2",
"-q", batch_size,
"-h", "3",
"-j", "600",
"-o", builddir + "/operator_pub.pem"]
def start_replica_cmd_db_snapshot_disabled(builddir, replica_id):
"""
Return a command that starts an skvbc replica when passed to
subprocess.Popen.
Note each arguments is an element in a list.
"""
statusTimerMilli = "500"
viewChangeTimeoutMilli = "10000"
path = os.path.join(builddir, "tests", "simpleKVBC", "TesterReplica", "skvbc_replica")
if os.environ.get('TIME_SERVICE_ENABLED', default="FALSE").lower() == "true" :
batch_size = "2"
time_service_enabled = "1"
else :
batch_size = "1"
time_service_enabled = "0"
return [path,
"-k", KEY_FILE_PREFIX,
"-i", str(replica_id),
"-s", statusTimerMilli,
"-v", viewChangeTimeoutMilli,
"-l", os.path.join(builddir, "tests", "simpleKVBC", "scripts", "logging.properties"),
"-f", time_service_enabled,
"-b", "2",
"-q", batch_size,
"-h", "0"]
class SkvbcDbSnapshotTest(unittest.TestCase):
__test__ = False # so that PyTest ignores this test scenario
@with_trio
@with_bft_network(start_replica_cmd, selected_configs=lambda n, f, c: n == 7)
@verify_linearizability()
async def test_db_checkpoint_creation(self, bft_network, tracker):
bft_network.start_all_replicas()
client = bft_network.random_client()
skvbc = kvbc.SimpleKVBCProtocol(bft_network, tracker)
for i in range(150):
await skvbc.send_write_kv_set()
await self.wait_for_stable_checkpoint(bft_network, bft_network.all_replicas(), 150)
num_of_db_snapshots = await bft_network.get_metric(0, bft_network, "Counters", "numOfDbCheckpointsCreated", component="rocksdbCheckpoint")
assert num_of_db_snapshots == 1
last_blockId = await bft_network.get_metric(0, bft_network, "Gauges", "lastDbCheckpointBlockId", component="rocksdbCheckpoint")
self.verify_snapshot_is_available(bft_network, 0, last_blockId)
@with_trio
@with_bft_network(start_replica_cmd, selected_configs=lambda n, f, c: n == 7)
@verify_linearizability()
async def test_restore_from_snapshot(self, bft_network, tracker):
initial_prim = 0
bft_network.start_all_replicas()
skvbc = kvbc.SimpleKVBCProtocol(bft_network, tracker)
for i in range(150):
await skvbc.send_write_kv_set()
await self.wait_for_stable_checkpoint(bft_network, bft_network.all_replicas(), 150)
num_of_db_snapshots = await bft_network.get_metric(0, bft_network, "Counters", "numOfDbCheckpointsCreated", component="rocksdbCheckpoint")
assert num_of_db_snapshots == 1
snapshot_id = await bft_network.get_metric(0, bft_network, "Gauges", "lastDbCheckpointBlockId", component="rocksdbCheckpoint")
self.verify_snapshot_is_available(bft_network, 0, snapshot_id)
fast_paths = {}
for r in bft_network.all_replicas():
nb_fast_path = await bft_network.get_metric(r, bft_network, "Counters", "totalFastPaths")
fast_paths[r] = nb_fast_path
crashed_replica = list(bft_network.random_set_of_replicas(1, {initial_prim}))
live_replicas = bft_network.all_replicas(without=set(crashed_replica))
bft_network.stop_replicas(crashed_replica)
for i in range(450):
await skvbc.send_write_kv_set()
for r in crashed_replica:
self.restore_form_older_snapshot(bft_network, r, snapshot_id)
bft_network.start_replicas(crashed_replica)
await bft_network.wait_for_state_transfer_to_start()
for r in crashed_replica:
await bft_network.wait_for_state_transfer_to_stop(initial_prim,
r,
stop_on_stable_seq_num=False)
for i in range(600):
await skvbc.send_write_kv_set()
for r in bft_network.all_replicas():
nb_fast_path = await bft_network.get_metric(r, bft_network, "Counters", "totalFastPaths")
self.assertGreater(nb_fast_path, fast_paths[r])
@with_trio
@with_bft_network(start_replica_cmd_db_snapshot_disabled, selected_configs=lambda n, f, c: n == 7)
@verify_linearizability()
async def test_db_checkpoint_disabled(self, bft_network, tracker):
bft_network.start_all_replicas()
client = bft_network.random_client()
skvbc = kvbc.SimpleKVBCProtocol(bft_network, tracker)
for i in range(300):
await skvbc.send_write_kv_set()
await self.wait_for_stable_checkpoint(bft_network, bft_network.all_replicas(), 300)
num_of_db_snapshots = await bft_network.get_metric(0, bft_network, "Counters", "numOfDbCheckpointsCreated", component="rocksdbCheckpoint")
assert num_of_db_snapshots == 0
@with_trio
@with_bft_network(start_replica_cmd, selected_configs=lambda n, f, c: n == 7)
@verify_linearizability()
async def test_db_checkpoint_cleanup(self, bft_network, tracker):
'''
In this test, we verify that oldest db checkpoint is removed once,
we reach the maxNumber of allowed db checkpoints
'''
initial_prim = 0
bft_network.start_all_replicas()
client = bft_network.random_client()
skvbc = kvbc.SimpleKVBCProtocol(bft_network, tracker)
checkpoint_before = await bft_network.wait_for_checkpoint(replica_id=initial_prim)
await skvbc.fill_and_wait_for_checkpoint(
initial_nodes=bft_network.all_replicas(),
num_of_checkpoints_to_add=1,
verify_checkpoint_persistency=False,
assert_state_transfer_not_started=False
)
checkpoint_after_1 = await bft_network.wait_for_checkpoint(replica_id=initial_prim)
self.assertGreaterEqual(checkpoint_before + 1, checkpoint_after_1)
await self.wait_for_stable_checkpoint(bft_network, bft_network.all_replicas(), checkpoint_after_1*150)
num_of_db_snapshots = await bft_network.get_metric(0, bft_network, "Counters", "numOfDbCheckpointsCreated", component="rocksdbCheckpoint")
assert num_of_db_snapshots == 1
old_snapshot_id = await bft_network.get_metric(0, bft_network, "Gauges", "lastDbCheckpointBlockId", component="rocksdbCheckpoint")
self.verify_snapshot_is_available(bft_network, 0, old_snapshot_id)
await skvbc.fill_and_wait_for_checkpoint(
initial_nodes=bft_network.all_replicas(),
num_of_checkpoints_to_add=3,
verify_checkpoint_persistency=False,
assert_state_transfer_not_started=False
)
checkpoint_after_2 = await bft_network.wait_for_checkpoint(replica_id=initial_prim)
self.assertGreaterEqual(checkpoint_after_1 + 3, checkpoint_after_2)
await self.wait_for_stable_checkpoint(bft_network, bft_network.all_replicas(), checkpoint_after_2*150)
num_of_db_snapshots = await bft_network.get_metric(0, bft_network, "Counters", "numOfDbCheckpointsCreated", component="rocksdbCheckpoint")
assert num_of_db_snapshots == 4
self.verify_snapshot_is_available(bft_network, 0, old_snapshot_id, isPresent=False)
@with_trio
@with_bft_network(start_replica_cmd_with_operator, selected_configs=lambda n, f, c: n == 7)
@verify_linearizability()
async def test_create_dbcheckpoint_cmd(self, bft_network, tracker):
"""
sends a createdbCheckpoint command and test for created dbcheckpoints.
"""
bft_network.start_all_replicas()
client = bft_network.random_client()
skvbc = kvbc.SimpleKVBCProtocol(bft_network, tracker)
for i in range(200):
await skvbc.send_write_kv_set()
op = operator.Operator(bft_network.config, client, bft_network.builddir)
rep = await op.create_dbcheckpoint_cmd()
data = cmf_msgs.ReconfigurationResponse.deserialize(rep)[0]
assert(data.success == True)
await self.wait_for_stable_checkpoint(bft_network, bft_network.all_replicas(), 300)
getrep = await op.get_dbcheckpoint_info_request()
rsi_rep = client.get_rsi_replies()
data = cmf_msgs.ReconfigurationResponse.deserialize(getrep)[0]
assert(data.success == True)
for r in rsi_rep.values():
res = cmf_msgs.ReconfigurationResponse.deserialize(r)
assert(len(res[0].response.db_checkpoint_info) == 1)
dbcheckpoint_info_list = res[0].response.db_checkpoint_info
assert(any(dbcheckpoint_info.seq_num == 300 for dbcheckpoint_info in dbcheckpoint_info_list))
last_blockId = await bft_network.get_metric(0, bft_network, "Gauges", "lastDbCheckpointBlockId", component="rocksdbCheckpoint")
self.verify_snapshot_is_available(bft_network, 0, last_blockId)
@with_trio
@with_bft_network(start_replica_cmd, selected_configs=lambda n, f, c: n == 7)
@verify_linearizability()
async def test_get_dbcheckpoint_info_request_cmd(self, bft_network, tracker):
"""
sends a getdbCheckpointInfoRequest command and test for created dbcheckpoints.
"""
bft_network.start_all_replicas()
client = bft_network.random_client()
skvbc = kvbc.SimpleKVBCProtocol(bft_network, tracker)
for i in range(300):
await skvbc.send_write_kv_set()
# There will be 2 dbcheckpoints created on stable seq num 150 and 300.
await self.wait_for_stable_checkpoint(bft_network, bft_network.all_replicas(), 300)
num_of_db_snapshots = await bft_network.get_metric(0, bft_network, "Counters", "numOfDbCheckpointsCreated", component="rocksdbCheckpoint")
assert num_of_db_snapshots == 2
op = operator.Operator(bft_network.config, client, bft_network.builddir)
rep = await op.get_dbcheckpoint_info_request()
rsi_rep = client.get_rsi_replies()
data = cmf_msgs.ReconfigurationResponse.deserialize(rep)[0]
assert(data.success == True)
for r in rsi_rep.values():
res = cmf_msgs.ReconfigurationResponse.deserialize(r)
assert(len(res[0].response.db_checkpoint_info) == 2)
dbcheckpoint_info_list = res[0].response.db_checkpoint_info
assert(any(dbcheckpoint_info.seq_num == 150 for dbcheckpoint_info in dbcheckpoint_info_list))
assert(any(dbcheckpoint_info.seq_num == 300 for dbcheckpoint_info in dbcheckpoint_info_list))
last_blockId = await bft_network.get_metric(0, bft_network, "Gauges", "lastDbCheckpointBlockId", component="rocksdbCheckpoint")
self.verify_snapshot_is_available(bft_network, 0, last_blockId)
def verify_snapshot_is_available(self, bft_network, replicaId, shapshotId, isPresent=True):
with log.start_action(action_type="verify snapshot db files"):
snapshot_db_dir = os.path.join(bft_network.testdir, DB_SNAPSHOT_PREFIX + str(replicaId) + "/" + str(shapshotId))
if isPresent == True:
assert os.path.exists(snapshot_db_dir)
size=0
for ele in os.scandir(snapshot_db_dir):
size+=os.path.getsize(ele)
assert (size > 0) #make sure that checkpoint folder is not empty
else:
assert (os.path.exists(snapshot_db_dir) == False)
def restore_form_older_snapshot(self, bft_network, replica, snapshot_id):
with log.start_action(action_type="restore with older snapshot"):
snapshot_db_dir = os.path.join(bft_network.testdir, DB_SNAPSHOT_PREFIX + str(replica) + "/" + str(snapshot_id))
dest_db_dir = os.path.join(bft_network.testdir, DB_FILE_PREFIX + str(replica))
if os.path.exists(dest_db_dir) :
shutil.rmtree(dest_db_dir)
ret = shutil.copytree(snapshot_db_dir, dest_db_dir)
log.log_message(message_type=f"copy db files from {snapshot_db_dir} to {dest_db_dir}, result is {ret}")
def transfer_dbcheckpoint_files(self, bft_network, source_replica, snapshot_id, dest_replicas):
with log.start_action(action_type="transfer snapshot db files"):
snapshot_db_dir = os.path.join(bft_network.testdir, DB_SNAPSHOT_PREFIX + str(source_replica) + "/" + str(snapshot_id))
for r in dest_replicas:
dest_db_dir = os.path.join(bft_network.testdir, DB_FILE_PREFIX + str(r))
if os.path.exists(dest_db_dir) :
shutil.rmtree(dest_db_dir)
ret = shutil.copytree(snapshot_db_dir, dest_db_dir)
log.log_message(message_type=f"copy db files from {snapshot_db_dir} to {dest_db_dir}, result is {ret}")
async def wait_for_stable_checkpoint(self, bft_network, replicas, stable_seqnum):
with trio.fail_after(seconds=30):
all_in_checkpoint = False
while all_in_checkpoint is False:
all_in_checkpoint = True
for r in replicas:
lastStable = await bft_network.get_metric(r, bft_network, "Gauges", "lastStableSeqNum")
if lastStable != stable_seqnum:
all_in_checkpoint = False
break
await trio.sleep(0.5)
| 49.765766
| 147
| 0.678071
| 2,064
| 16,572
| 5.130814
| 0.13905
| 0.09915
| 0.026912
| 0.023796
| 0.762134
| 0.752502
| 0.735977
| 0.720585
| 0.705571
| 0.692729
| 0
| 0.013474
| 0.229725
| 16,572
| 333
| 148
| 49.765766
| 0.816138
| 0.060162
| 0
| 0.612546
| 0
| 0
| 0.086448
| 0.017501
| 0
| 0
| 0
| 0
| 0.081181
| 1
| 0.02214
| false
| 0
| 0.070111
| 0
| 0.110701
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
dd0089ba998b404f63fe20c2043bb7eaa32c7b12
| 369
|
py
|
Python
|
kaggle_graph/__init__.py
|
OliverSieweke/kaggle-graph
|
daf3cb151ee6c4f6087eb9bb539b2ed6961d4f73
|
[
"MIT"
] | 2
|
2020-05-03T23:00:29.000Z
|
2020-05-05T21:49:26.000Z
|
kaggle_graph/__init__.py
|
OliverSieweke/kaggle-graph
|
daf3cb151ee6c4f6087eb9bb539b2ed6961d4f73
|
[
"MIT"
] | 6
|
2020-05-05T06:21:22.000Z
|
2020-05-05T18:08:01.000Z
|
kaggle_graph/__init__.py
|
OliverSieweke/kaggle-graph
|
daf3cb151ee6c4f6087eb9bb539b2ed6961d4f73
|
[
"MIT"
] | null | null | null |
"""
Kaggle Graph
============
This is the top level module, which includes a :code:`__main__` script
to generate the Kaggle graph by loading the required environment
variables (including the Kaggle credentials) using
:py:mod:`kaggle_graph.action_inputs` and then composing the methods
exposed in :py:mod:`kaggle_graph.plot` and
:py:mod:`kaggle_graph.submissions`.
"""
| 30.75
| 70
| 0.761518
| 54
| 369
| 5.055556
| 0.666667
| 0.201465
| 0.120879
| 0.175824
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119241
| 369
| 11
| 71
| 33.545455
| 0.84
| 0.97561
| 0
| null | 1
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
dd095804f8bce97f947102719ab00fdd896fe449
| 18,269
|
py
|
Python
|
api/users/tests.py
|
liobrdev/simplekanban
|
ececbe15cd34aa53e7d37564879a8c14827e0ebb
|
[
"MIT"
] | null | null | null |
api/users/tests.py
|
liobrdev/simplekanban
|
ececbe15cd34aa53e7d37564879a8c14827e0ebb
|
[
"MIT"
] | null | null | null |
api/users/tests.py
|
liobrdev/simplekanban
|
ececbe15cd34aa53e7d37564879a8c14827e0ebb
|
[
"MIT"
] | null | null | null |
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core import mail
from django_redis import get_redis_connection
from rest_framework import status
from rest_framework.reverse import reverse
from rest_framework.test import APITestCase
from custom_db_logger.models import StatusLog
from custom_db_logger.utils import LogLevels
from users.utils import UserCommands
from utils.testing import test_user_1, test_user_2, create_user, log_msg_regex
class UserAccountTest(APITestCase):
databases = '__all__'
def setUp(self):
self.user_1 = create_user()
self.user_2 = create_user(test_user_2)
def tearDown(self):
get_redis_connection('default').flushall()
def test_user_update_fail_missing_password(self):
login = self.client.post(reverse('login'), data={
'email': test_user_1['email'],
'password': test_user_1['password'],
})
user_slug = login.data['user']['user_slug']
self.client.credentials(HTTP_AUTHORIZATION=f"Token {login.data['token']}")
url = f'/api/users/{user_slug}/'
patch = self.client.patch(url, data={ 'name': 'New' }, format='json')
self.assertEqual(patch.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(patch.data['detail'], 'Error updating account.')
log = StatusLog.objects.using('logger').latest('created_at')
self.assertRegex(log.msg, log_msg_regex('Error updating user.', LogLevels.ERROR))
self.assertEqual(log.command, UserCommands.UPDATE)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject,
f'{settings.EMAIL_SUBJECT_PREFIX}ERROR: Error updating user.')
self.assertListEqual(mail.outbox[0].to, ['contact@simplekanban.app'])
self.assertEqual(StatusLog.objects.using('logger').count(), 1)
def test_user_update_fail_wrong_password(self):
login = self.client.post(reverse('login'), data={
'email': test_user_1['email'],
'password': test_user_1['password'],
})
user_slug = login.data['user']['user_slug']
self.client.credentials(HTTP_AUTHORIZATION=f"Token {login.data['token']}")
url = f'/api/users/{user_slug}/'
patch = self.client.patch(
url,
data={ 'name': 'New', 'current_password': 'wrongPw#1' },
format='json',
)
self.assertEqual(patch.status_code, status.HTTP_400_BAD_REQUEST)
self.assertListEqual(patch.data['current_password'], ['Invalid password.'])
self.assertEqual(StatusLog.objects.using('logger').count(), 0)
def test_user_update_email_fail_already_in_use(self):
login = self.client.post(reverse('login'), data={
'email': test_user_1['email'],
'password': test_user_1['password'],
})
user_slug = login.data['user']['user_slug']
self.client.credentials(HTTP_AUTHORIZATION=f"Token {login.data['token']}")
url = f'/api/users/{user_slug}/'
patch = self.client.patch(
url,
data={
'email': self.user_2.email,
'current_password': test_user_1['password'],
},
format='json',
)
self.assertEqual(patch.status_code, status.HTTP_400_BAD_REQUEST)
self.assertListEqual(patch.data['email'], [
'Email address unavailable - please choose a different one.',
])
self.assertEqual(StatusLog.objects.using('logger').count(), 0)
def test_user_update_password_fail_invalid(self):
login = self.client.post(reverse('login'), data={
'email': test_user_1['email'],
'password': test_user_1['password'],
})
user_slug = login.data['user']['user_slug']
self.client.credentials(HTTP_AUTHORIZATION=f"Token {login.data['token']}")
url = f'/api/users/{user_slug}/'
# All numeric
patch_1 = self.client.patch(
url,
data={
'password': '89898022',
'password_2': '89898022',
'current_password': test_user_1['password'],
},
format='json',
)
self.assertEqual(patch_1.status_code, status.HTTP_400_BAD_REQUEST)
self.assertListEqual(patch_1.data['password'], [
'This password is entirely numeric.',
])
# Similar to email
patch_2 = self.client.patch(
url,
data={
'password': test_user_1['email'],
'password_2': test_user_1['email'],
'current_password': test_user_1['password'],
},
format='json',
)
self.assertEqual(patch_2.status_code, status.HTTP_400_BAD_REQUEST)
self.assertListEqual(patch_2.data['password'], [
'The password is too similar to the email address.',
])
# Too common
patch_3 = self.client.patch(
url,
data={
'password': 'asdfqwer',
'password_2': 'asdfqwer',
'current_password': test_user_1['password'],
},
format='json',
)
self.assertEqual(patch_3.status_code, status.HTTP_400_BAD_REQUEST)
self.assertListEqual(patch_3.data['password'], ['This password is too common.'])
# Too short, add log
patch_4 = self.client.patch(
url,
data={
'password': 'pAssW0#',
'password_2': 'pAssW0#',
'current_password': test_user_1['password'],
},
format='json',
)
self.assertEqual(patch_4.status_code, status.HTTP_400_BAD_REQUEST)
self.assertListEqual(patch_4.data['password'], [
'This password is too short. It must contain at least 8 characters.',
])
log_1 = StatusLog.objects.using('logger').latest('created_at')
self.assertRegex(log_1.msg, log_msg_regex('Short password.', LogLevels.ERROR))
self.assertEqual(log_1.command, UserCommands.UPDATE)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject,
f'{settings.EMAIL_SUBJECT_PREFIX}ERROR: Short password.')
self.assertListEqual(mail.outbox[0].to, ['contact@simplekanban.app'])
self.assertEqual(StatusLog.objects.using('logger').count(), 1)
# Not matching, add log
patch_5 = self.client.patch(
url,
data={
'password': 'newPassw0rd$',
'password_2': 'newPassw0rd',
'current_password': test_user_1['password'],
},
format='json',
)
self.assertEqual(patch_5.status_code, status.HTTP_400_BAD_REQUEST)
self.assertListEqual(patch_5.data['password_2'], ['Passwords do not match.'])
log_2 = StatusLog.objects.using('logger').latest('created_at')
self.assertRegex(log_2.msg, log_msg_regex('Error changing user password.', LogLevels.ERROR))
self.assertEqual(log_2.command, UserCommands.UPDATE)
self.assertEqual(len(mail.outbox), 2)
self.assertEqual(mail.outbox[1].subject,
f'{settings.EMAIL_SUBJECT_PREFIX}ERROR: Error changing user password.')
self.assertListEqual(mail.outbox[1].to, ['contact@simplekanban.app'])
self.assertEqual(StatusLog.objects.using('logger').count(), 2)
# Missing password, add log
patch_6 = self.client.patch(
url,
data={
'password_2': 'newPassw0rd$',
'current_password': test_user_1['password'],
},
format='json',
)
self.assertEqual(patch_6.status_code, status.HTTP_400_BAD_REQUEST)
self.assertListEqual(patch_6.data['password'], ['Invalid password change.'])
log_3 = StatusLog.objects.using('logger').latest('created_at')
self.assertRegex(log_3.msg, log_msg_regex('Error changing user password.', LogLevels.ERROR))
self.assertEqual(log_3.command, UserCommands.UPDATE)
self.assertEqual(len(mail.outbox), 3)
self.assertEqual(mail.outbox[2].subject,
f'{settings.EMAIL_SUBJECT_PREFIX}ERROR: Error changing user password.')
self.assertListEqual(mail.outbox[2].to, ['contact@simplekanban.app'])
self.assertEqual(StatusLog.objects.using('logger').count(), 3)
def test_user_update_password_fail_same_as_slug(self):
login = self.client.post(reverse('login'), data={
'email': test_user_1['email'],
'password': test_user_1['password'],
})
user_slug = login.data['user']['user_slug']
self.client.credentials(HTTP_AUTHORIZATION=f"Token {login.data['token']}")
url = f'/api/users/{user_slug}/'
patch = self.client.patch(
url,
data={
'password': user_slug,
'password_2': user_slug,
'current_password': test_user_1['password'],
},
format='json',
)
self.assertEqual(patch.status_code, status.HTTP_400_BAD_REQUEST)
self.assertListEqual(patch.data['password'], ['Password cannot be your user ID.'])
self.assertEqual(StatusLog.objects.using('logger').count(), 0)
def test_successful_user_password_update(self):
login = self.client.post(reverse('login'), data={
'email': test_user_1['email'],
'password': test_user_1['password'],
})
user_slug = login.data['user']['user_slug']
self.client.credentials(HTTP_AUTHORIZATION=f"Token {login.data['token']}")
url = f'/api/users/{user_slug}/'
patch = self.client.patch(
url,
data={
'password': 'newPassw0rd$',
'password_2': 'newPassw0rd$',
'current_password': test_user_1['password'],
},
format='json',
)
self.assertEqual(patch.status_code, status.HTTP_200_OK)
# Test new password
login_1 = self.client.post(reverse('login'), data={
'email': test_user_1['email'],
'password': 'newPassw0rd$',
})
self.assertEqual(login_1.status_code, status.HTTP_200_OK)
# Test old password fail
login_2 = self.client.post(reverse('login'), data={
'email': test_user_1['email'],
'password': test_user_1['password'],
})
self.assertEqual(login_2.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(StatusLog.objects.using('logger').count(), 0)
def test_cannot_access_another_users_info(self):
login = self.client.post(reverse('login'), data={
'email': test_user_1['email'],
'password': test_user_1['password'],
})
self.client.credentials(HTTP_AUTHORIZATION=f"Token {login.data['token']}")
url = f'/api/users/{self.user_2.user_slug}/'
res_fail = self.client.patch(
url,
data={
'email': 'newemail.willfail@email.com',
'current_password': test_user_1['password'],
},
format='json',
)
self.assertEqual(res_fail.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(res_fail.data['detail'], 'User denied access.')
log = StatusLog.objects.using('logger').latest('created_at')
self.assertRegex(log.msg, log_msg_regex('User denied access.', LogLevels.ERROR))
self.assertEqual(log.command, UserCommands.UPDATE)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject,
f'{settings.EMAIL_SUBJECT_PREFIX}ERROR: User denied access.')
self.assertListEqual(mail.outbox[0].to, ['contact@simplekanban.app'])
self.assertEqual(StatusLog.objects.using('logger').count(), 1)
def test_user_update_fail_empty_info(self):
login = self.client.post(reverse('login'), data={
'email': test_user_1['email'],
'password': test_user_1['password'],
})
user_slug = login.data['user']['user_slug']
self.client.credentials(HTTP_AUTHORIZATION=f"Token {login.data['token']}")
url = f'/api/users/{user_slug}/'
patch = self.client.patch(
url,
data={
'name': '',
'email': '',
'password': '',
'password_2': '',
'current_password': test_user_1['password'],
},
format='json',
)
self.assertEqual(patch.status_code, status.HTTP_400_BAD_REQUEST)
self.assertListEqual(patch.data['name'], ['This field may not be blank.'])
self.assertListEqual(patch.data['email'], ['This field may not be blank.'])
log = StatusLog.objects.using('logger').latest('created_at')
self.assertRegex(log.msg, log_msg_regex('Blank user update.', LogLevels.ERROR))
self.assertEqual(log.command, UserCommands.UPDATE)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject,
f'{settings.EMAIL_SUBJECT_PREFIX}ERROR: Blank user update.')
self.assertListEqual(mail.outbox[0].to, ['contact@simplekanban.app'])
self.assertEqual(StatusLog.objects.using('logger').count(), 1)
def test_user_update_fail_empty_info(self):
login = self.client.post(reverse('login'), data={
'email': test_user_1['email'],
'password': test_user_1['password'],
})
user_slug = login.data['user']['user_slug']
self.client.credentials(HTTP_AUTHORIZATION=f"Token {login.data['token']}")
url = f'/api/users/{user_slug}/'
patch = self.client.patch(
url,
data={
'name': 'Bad name #$',
'email': 'bademail.com',
'password': '',
'password_2': '',
'current_password': test_user_1['password'],
},
format='json',
)
self.assertEqual(patch.status_code, status.HTTP_400_BAD_REQUEST)
self.assertListEqual(patch.data['name'], ['Please enter a valid name.'])
self.assertListEqual(patch.data['email'], ['Please enter a valid email address.'])
self.assertEqual(StatusLog.objects.using('logger').count(), 0)
def test_successful_user_info_update(self):
login = self.client.post(reverse('login'), data={
'email': test_user_1['email'],
'password': test_user_1['password'],
})
user_slug = login.data['user']['user_slug']
self.client.credentials(HTTP_AUTHORIZATION=f"Token {login.data['token']}")
url = f'/api/users/{user_slug}/'
patch = self.client.patch(
url,
data={
'name': 'New Name',
'email': 'fake3@email.com',
'current_password': test_user_1['password'],
},
format='json',
)
self.assertEqual(patch.status_code, status.HTTP_200_OK)
self.assertEqual(patch.data['name'], 'New Name')
self.assertEqual(patch.data['email'], 'fake3@email.com')
self.assertEqual(StatusLog.objects.using('logger').count(), 0)
def test_user_can_deactivate_account(self):
login = self.client.post(reverse('login'), data={
'email': test_user_1['email'],
'password': test_user_1['password'],
})
user_slug = login.data['user']['user_slug']
self.client.credentials(HTTP_AUTHORIZATION=f"Token {login.data['token']}")
url = f'/api/users/{user_slug}/'
delete_fail = self.client.delete(
url,
data={
'current_password': 'wrong',
'email': test_user_1['email'] + 'salt',
},
format='json',
)
self.assertEqual(delete_fail.status_code, status.HTTP_400_BAD_REQUEST)
self.assertListEqual(delete_fail.data['current_password'], ['Invalid password.'])
self.assertListEqual(delete_fail.data['email'], ['Invalid email.'])
delete = self.client.delete(
url,
data={
'current_password': test_user_1['password'],
'email': test_user_1['email'],
},
format='json',
)
self.assertEqual(delete.status_code, status.HTTP_204_NO_CONTENT)
user = get_user_model().objects.get(user_slug=user_slug)
self.assertFalse(user.is_active)
patch_fail = self.client.patch(
url,
data={
'name': 'New Name',
'current_password': test_user_1['password'],
},
format='json',
)
self.assertEqual(patch_fail.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(patch_fail.data['detail'], 'Invalid token.')
self.client.credentials()
login_fail = self.client.post(reverse('login'), data={
'email': test_user_1['email'],
'password': test_user_1['password'],
})
self.assertEqual(login_fail.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(login_fail.data['detail'], (
'Failed to log in with the info provided.'
))
self.assertEqual(StatusLog.objects.using('logger').count(), 0)
def test_successful_retrieve_user(self):
login = self.client.post(reverse('login'), data={
'email': test_user_1['email'],
'password': test_user_1['password'],
})
self.client.credentials(HTTP_AUTHORIZATION=f"Token {login.data['token']}")
get = self.client.get(reverse('users'))
self.assertEqual(get.status_code, status.HTTP_200_OK)
self.assertEqual(get.data['name'], test_user_1['name'])
self.assertEqual(get.data['email'], test_user_1['email'])
self.assertEqual(get.headers['Access-Control-Expose-Headers'], 'X-Client-Ip')
self.assertEqual(get.headers['X-Client-Ip'], '127.0.0.1')
self.assertEqual(StatusLog.objects.using('logger').count(), 0)
| 43.291469
| 100
| 0.598336
| 2,053
| 18,269
| 5.116902
| 0.09206
| 0.091385
| 0.043693
| 0.048548
| 0.791052
| 0.760209
| 0.715183
| 0.704807
| 0.677582
| 0.662351
| 0
| 0.017112
| 0.261098
| 18,269
| 422
| 101
| 43.291469
| 0.761093
| 0.008046
| 0
| 0.561856
| 0
| 0
| 0.208182
| 0.051838
| 0
| 0
| 0
| 0
| 0.237113
| 1
| 0.036082
| false
| 0.190722
| 0.028351
| 0
| 0.069588
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
dd0ffaba462851ab43de057493905fd61a4648ac
| 236
|
py
|
Python
|
network/__init__.py
|
prabhuteja12/model-uncertainty-for-adaptation
|
e1303ca4775d4f4a0035637c6ed03df7f16862ad
|
[
"MIT"
] | 25
|
2021-04-10T14:28:49.000Z
|
2022-03-30T03:31:22.000Z
|
network/__init__.py
|
prabhuteja12/model-uncertainty-for-adaptation
|
e1303ca4775d4f4a0035637c6ed03df7f16862ad
|
[
"MIT"
] | 4
|
2021-08-03T09:39:27.000Z
|
2022-01-15T09:21:01.000Z
|
network/__init__.py
|
prabhuteja12/model-uncertainty-for-adaptation
|
e1303ca4775d4f4a0035637c6ed03df7f16862ad
|
[
"MIT"
] | 3
|
2021-04-15T05:24:10.000Z
|
2021-09-17T08:08:53.000Z
|
#
# SPDX-FileCopyrightText: 2021 Idiap Research Institute
#
# Written by Prabhu Teja <prabhu.teja@idiap.ch>,
#
# SPDX-License-Identifier: MIT
from .models import DeeplabMulti
from .decoders import JointSegAuxDecoderModel, NoisyDecoders
| 26.222222
| 60
| 0.800847
| 27
| 236
| 7
| 0.777778
| 0.10582
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019139
| 0.114407
| 236
| 9
| 60
| 26.222222
| 0.885167
| 0.54661
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
dd7eb9cc94be7085c1f5d256a10d966d96e3e1c0
| 142
|
py
|
Python
|
core/admin.py
|
KATO-Hiro/star-chart
|
c3e12e413012c2677ab7c2516a01d21c41cd2998
|
[
"MIT"
] | 1
|
2019-12-20T13:48:36.000Z
|
2019-12-20T13:48:36.000Z
|
core/admin.py
|
KATO-Hiro/star-chart
|
c3e12e413012c2677ab7c2516a01d21c41cd2998
|
[
"MIT"
] | 15
|
2019-12-15T18:00:44.000Z
|
2021-09-22T23:36:57.000Z
|
core/admin.py
|
KATO-Hiro/star-chart
|
c3e12e413012c2677ab7c2516a01d21c41cd2998
|
[
"MIT"
] | 1
|
2019-12-20T13:48:39.000Z
|
2019-12-20T13:48:39.000Z
|
from django.contrib import admin
from .models import Repository,StarHistory
admin.site.register(Repository)
admin.site.register(StarHistory)
| 23.666667
| 42
| 0.84507
| 18
| 142
| 6.666667
| 0.555556
| 0.15
| 0.283333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.077465
| 142
| 5
| 43
| 28.4
| 0.916031
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
dd9397c1d5714a3fe7ce2f2788e4e0d903f55bcc
| 30,564
|
py
|
Python
|
tmp.py
|
tracyleaf/kinetics-i3d
|
cc3e850ddf33494af961d0ab2f3c0c44920b7fae
|
[
"Apache-2.0"
] | null | null | null |
tmp.py
|
tracyleaf/kinetics-i3d
|
cc3e850ddf33494af961d0ab2f3c0c44920b7fae
|
[
"Apache-2.0"
] | null | null | null |
tmp.py
|
tracyleaf/kinetics-i3d
|
cc3e850ddf33494af961d0ab2f3c0c44920b7fae
|
[
"Apache-2.0"
] | null | null | null |
# -*-coding:utf-8-*-
import os
import shutil
import tensorflow as tf
import cv2
# file = os.listdir('E:/dataset/instruments_video/kugou_mv_dataset_part_v1/CutVideo_output/06/test')
# file2 = os.listdir('E:/open Source/kinetics-i3d/kinetics-i3d/preprocess/data/flow/06/test')
#
# # for i in file:
# # if i[:-4] not in file1:
# # list2.append[i]
#
# list1 = [i[:-4] for i in file]
# list2 = [i[:-4] for i in file2]
# list3 = [i for i in list1 if i not in list2]
# print(list3)
# print(len(list3))
# list4 = ['868900865_1549_part_3', '868900865_1549_part_4', '868900865_1549_part_5', '868900865_1549_part_6', '868900865_1549_part_7', '868900865_1549_part_8', '868900865_1549_part_9', '869153222_31_part_0', '869153222_31_part_1', '869153222_31_part_10', '869153222_31_part_11', '869153222_31_part_12', '869153222_31_part_13', '869153222_31_part_14', '869153222_31_part_15', '869153222_31_part_16', '869153222_31_part_17', '869153222_31_part_18', '869153222_31_part_2', '869153222_31_part_3', '869153222_31_part_4', '869153222_31_part_5', '869153222_31_part_6', '869153222_31_part_7', '869153222_31_part_8', '869153222_31_part_9', '883903434_154_part_0', '883903434_154_part_1', '883903434_154_part_10', '883903434_154_part_2', '883903434_154_part_3', '883903434_154_part_4', '883903434_154_part_5', '883903434_154_part_6', '883903434_154_part_7', '883903434_154_part_8', '883903434_154_part_9', '89356062_3984_part_1', '89356062_3984_part_10', '89356062_3984_part_11', '89356062_3984_part_12', '89356062_3984_part_13', '89356062_3984_part_14', '89356062_3984_part_15']
# print(len(list4))
# f = open('UCF101-label.txt','a')
# for i in os.listdir('./UCF101'):
# f.write(i)
# f.close()
# f1 = open('C:/Users/aiyanye/Desktop/tmp_nan5.txt')
# f2 = open('E:/open Source/kinetics-i3d/kinetics-i3d/preprocess/data/train_test_label/train_videoImage_list_v5.txt')
# l1 = [i for i in f1.readlines()]
# l2 = [i for i in f2.readlines()]
# l3 = [i for i in l1 if i not in l2]
# print(l3)
# print(len(l3))
#
# batch = ['/02/train/24881317_114_part_3', '/03/train/24881317_19_part_1', '/04/train/987893423_1795_part_10', '/09/train/577220551_28483_part_1', '/01/train/443661357_15771_part_13', '/01/train/740066596_238_part_7', '/00/train/564129820_20060_part_4', '/04/train/496678133_19459_part_9']
# f1 = open('C:/Users/aiyanye/Desktop/tmplog7.txt')
# f2 = open('C:/Users/aiyanye/Desktop/tmp_nan.txt','wb')
# count = 0
# for i in f1.readlines():
# if 'preprocess' in i:
# tmp = i.split(' ')
#
# f2.write(tmp[0][20:-4] + ',' + tmp[1])
# print(tmp[0][20:-4])
# count += 1
# print(count)
# f1.close()
# f2.close()
# DATA_DIR = 'E:/dataset/instruments_video/kugou_mv_dataset_part_v1/CutVideo_output'
# DATA_DIR = 'E:/open Source/kinetics-i3d/kinetics-i3d/preprocess/data/nan'
# # file = os.listdir('E:/dataset/instruments_video/kugou_mv_dataset_part_v1/CutVideo_output/')
# file2 = os.listdir('E:/open Source/kinetics-i3d/kinetics-i3d/preprocess/data/nan')
# # f2 = open('preprocess/data/train_test_label/train_videoImage_list_v5.txt')
# f2 = open('C:/Users/aiyanye/Desktop/rgb-1.txt')
# list2 = [i for i in f2.readlines()]
# list1 = [i[:-4] for i in file]
# list2 = [i[:-4] for i in file2]
# filenames = ['/'+ class_fold + "/" + 'train'+ "/" + filename[:-4] + ',' + class_fold[1]+'\n' # filename + "//" + class_fold + "//" + train_or_test
# for class_fold in
# os.listdir(DATA_DIR)
# for filename in
# # tf.gfile.Glob(os.path.join(class_fold, '*'))
# os.listdir(DATA_DIR + "//" + class_fold + "//" + 'train')
# # os.listdir(FLAGS.data_dir + "//" + '00' + "//" + train_or_test)
# ]
# print(len(filenames))
# list3 = [i for i in list2 if i not in filenames ]
# print(list3)
# print(len(list3))
# print(list2[:3])
# print(len(list2))
# # f = open('C:/Users/aiyanye/Desktop/train_rgb.txt','wb')
# # for i in filenames:
# # f.write(i)
# # f.close()
# findmissing files
# def missingfiles(data_info):
# f = open(data_info)
# train_info = list()
# for line in f.readlines():
# pathlist = line.strip().split(',')
# pathdir = 'preprocess/data/rgb/'
# path = str(pathlist[0])
# videolabel = int(pathlist[1])
# file = pathdir + path + '.npy'
# if not os.path.exists(file):
# train_info.append(pathlist[0])
# f.close()
# return train_info #[filename,label]
# test_path = 'E:/dataset/instruments_video/Video_8k_dataset/label_8k/video_8k_test_list_v3.txt'
# missingfile = missingfiles(test_path)
# print(missingfile)
# print(len(missingfile))
# 提取检测模型xml文件
# src = 'E:/dataset/instruments_video/self labeling video/part2'
# target = 'E:/dataset/instruments_video/self labeling video/part2-labeled'
#
# for fold in os.listdir(src):
# srcdir = src + '/' + fold
# targetdir = target + '/' + fold
# if not tf.gfile.IsDirectory(targetdir):
# tf.gfile.MakeDirs(targetdir)
# for i in os.listdir(srcdir):
# if '.xml' not in i:
# if i[:-4] + '.xml' in os.listdir(srcdir):
# srcFile = os.path.join(srcdir, i)
# targetFile = os.path.join(targetdir, i)
# srcfile_xml = os.path.join(srcdir, i[:-4] + '.xml')
# targetFile_xml = os.path.join(targetdir, i[:-4] + '.xml')
# shutil.copy(srcFile, targetFile)
# shutil.copy(srcfile_xml, targetFile_xml)
# 输出混淆矩阵
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import numpy as np
def batch2array(pathlist, rgb_or_flow):
pathdir = _SAMPLE_PATHS[rgb_or_flow]
path = str(pathlist[0])
videolabel = int(pathlist[1])
file = pathdir + path + '.npy'
array = np.load(file) # (1, 15, 224, 224, 2)
return array, videolabel
def split_data(data_info):
f = open(data_info)
train_info = list()
for line in f.readlines():
info = line.strip().split(',')
assert(info[1])
train_info.append(info)
f.close()
return train_info #[filename,label]
# index的顺序与test文件的顺序一致
# y_true = [8, 1, 6, 8, 8, 1, 0, 8, 1, 6, 1, 5, 1, 0, 8, 0, 6, 8, 8, 5, 1, 3, 8, 0, 1, 8, 0, 1, 8, 5, 0, 2, 8, 7, 8, 4, 1, 6, 0, 8, 8, 0, 3, 0, 1, 8, 3, 8, 8, 1, 0, 6, 4, 5, 5, 1, 0, 6, 1, 3, 4, 8, 5, 1, 3, 0, 0, 7, 0, 0, 8, 3, 4, 0, 8, 4, 0, 1, 3, 1, 1, 4, 1, 6, 1, 8, 5, 8, 0, 4, 1, 1, 8, 8, 1, 6, 1, 8, 6, 4, 5, 8, 6, 4, 6, 1, 3, 4, 8, 2, 1, 6, 0, 7, 8, 2, 1, 4, 5, 1, 0, 7, 7, 0, 1, 8, 6, 7, 3, 3, 8, 8, 5, 8, 8, 0, 0, 1, 0, 8, 8, 1, 2, 8, 1, 3, 0, 1, 6, 1, 1, 0, 1, 0, 8, 3, 6, 4, 8, 1, 8, 1, 3, 3, 5, 5, 1, 8, 8, 3, 1, 3, 5, 8, 1, 8, 7, 1, 0, 3, 5, 1, 8, 0, 0, 3, 5, 0, 8, 1, 6, 7, 2, 1, 6, 0, 1, 4, 1, 2, 2, 7, 1, 1, 8, 5, 8, 1, 3, 3, 3, 2, 6, 0, 5, 8, 4, 6, 8, 8, 7, 8, 1, 3, 8, 6, 0, 0, 1, 0, 0, 6, 6, 1, 6, 8, 4, 8, 0, 8, 5, 8, 8, 8, 8, 0, 8, 0, 0, 8, 3, 5, 2, 0, 6, 8, 8, 4, 5, 3, 1, 4, 8, 5, 6, 3, 4, 1, 3, 8, 3, 8, 8, 3, 0, 8, 1, 4, 3, 8, 6, 7, 6, 5, 1, 1, 8, 2, 8, 5, 8, 3, 0, 8, 3, 2, 8, 8, 8, 0, 6, 8, 8, 1, 1, 0, 4, 1, 1, 8, 6, 2, 6, 8, 1, 1, 8, 8, 2, 1, 1, 3, 6, 2, 8, 2, 1, 1, 0, 1, 0, 8, 1, 6, 0, 1, 1, 1, 4, 0, 1, 5, 8, 3, 4, 1, 8, 6, 8, 5, 0, 1, 5, 8, 8, 1, 3, 0, 6, 8, 5, 4, 0, 6, 1, 1, 3, 5, 8, 1, 8, 8, 0, 6, 0, 7, 0, 3, 0, 1, 3, 0, 0, 1, 8, 0, 3, 1, 8, 0, 7, 1, 0, 0, 0, 1, 2, 1, 8, 8, 1, 1, 5, 0, 0, 4, 1, 4, 1, 3, 6, 1, 6, 8, 0, 8, 6, 5, 6, 1, 1, 3, 0, 8, 6, 3, 0, 1, 8, 1, 7, 6, 0, 1, 3, 2, 6, 0, 7, 6, 6, 1, 0, 3, 6, 8, 8, 3, 8, 3, 0, 5, 8, 5, 0, 1, 0, 1, 0, 0, 8, 3, 7, 8, 1, 0, 0, 1, 1, 8, 2, 8, 7, 8, 8, 7, 8, 1, 1, 0, 5, 3, 4, 1, 1, 2, 6, 0, 5, 6, 3, 1, 8, 7, 0, 5, 1, 1, 5, 3, 8, 8, 1, 1, 1, 1, 8, 6, 8, 8, 8, 5, 8, 8, 6, 7, 5, 1, 3, 4, 0, 5, 8, 2, 0, 6, 0, 1, 5, 8, 1, 0, 0, 0, 8, 2, 5, 7, 1, 8, 4, 0, 0, 0, 6, 8, 3, 8, 2, 1, 3, 5, 2, 3, 7, 3, 6, 3, 4, 1, 5, 8, 3, 5, 8, 8, 8, 0, 6, 8, 1, 1, 0, 2, 6, 0, 8, 8, 5, 2, 8, 6, 2, 8, 3, 8, 7, 7, 1, 2, 8, 1, 6, 6, 1, 0, 8, 0, 3, 2, 8, 1, 1, 6, 2, 1, 6, 1, 0, 1, 8, 6, 3, 4, 0, 6, 2, 1, 5, 0, 6, 8, 4, 6, 0, 1, 8, 6, 5, 8, 1, 6, 1, 0, 3, 1, 0, 1, 8, 3, 5, 0, 1, 8, 8, 8, 0, 1, 3, 0, 8, 2, 6, 0, 7, 8, 8, 1, 3, 8, 8, 5, 8, 0, 4, 1, 8, 6, 1, 0, 5, 7, 1, 3, 5, 8, 3, 1, 5, 8, 6, 0, 1, 6, 8, 0, 0, 4, 4, 8, 0, 8, 1, 5, 6, 8, 1, 3, 1, 8, 6, 0, 2, 8, 1, 3, 4, 5, 1, 0, 3, 8, 1, 8, 5, 5, 4, 1, 8, 5, 8, 6, 0, 3, 5, 3, 8, 1, 6, 1, 6, 0, 0, 8, 8, 8, 0, 8, 2, 1, 7, 0, 5, 1, 3, 3, 4, 8, 1, 5, 3, 0, 8, 0, 8, 7, 8, 8, 8, 0, 3, 2, 5, 6, 0, 0, 2, 0, 8, 8, 0, 8, 5, 8, 1, 1, 1, 1, 5, 8, 2, 1, 2, 4, 0, 0, 3, 5]
# y_pred = [2, 1, 6, 8, 8, 1, 0, 8, 1, 6, 1, 5, 1, 0, 8, 0, 6, 8, 0, 5, 1, 1, 8, 0, 1, 8, 0, 1, 5, 5, 0, 2, 0, 7, 8, 4, 1, 6, 0, 8, 8, 0, 3, 8, 1, 8, 3, 8, 8, 1, 0, 6, 8, 5, 5, 1, 0, 6, 1, 3, 4, 8, 5, 1, 3, 1, 0, 7, 0, 0, 8, 8, 8, 0, 8, 8, 0, 1, 3, 1, 1, 4, 1, 6, 1, 8, 5, 8, 0, 8, 1, 1, 8, 8, 1, 6, 1, 8, 6, 3, 5, 8, 6, 8, 6, 1, 8, 4, 8, 2, 1, 6, 0, 0, 8, 2, 1, 4, 5, 1, 0, 7, 7, 0, 1, 8, 6, 7, 3, 3, 5, 0, 5, 8, 5, 0, 0, 1, 0, 5, 8, 1, 2, 5, 1, 3, 0, 1, 6, 1, 1, 0, 1, 0, 8, 3, 6, 4, 8, 1, 8, 1, 3, 3, 5, 5, 1, 8, 8, 3, 1, 3, 5, 5, 1, 8, 0, 1, 0, 3, 5, 1, 8, 0, 1, 3, 5, 0, 8, 1, 6, 7, 2, 1, 6, 0, 1, 3, 1, 2, 2, 7, 1, 1, 2, 5, 8, 1, 3, 3, 3, 2, 6, 0, 5, 8, 1, 6, 8, 8, 6, 8, 1, 3, 8, 6, 0, 0, 1, 0, 0, 6, 6, 1, 6, 8, 4, 5, 0, 8, 5, 8, 8, 8, 8, 0, 8, 0, 0, 8, 3, 5, 2, 0, 6, 8, 8, 4, 5, 3, 1, 4, 3, 5, 6, 3, 8, 1, 3, 8, 3, 8, 8, 3, 8, 8, 1, 4, 3, 8, 6, 8, 6, 5, 1, 1, 8, 2, 8, 5, 8, 3, 1, 8, 3, 2, 8, 8, 5, 0, 6, 8, 8, 1, 1, 0, 1, 1, 1, 8, 6, 2, 6, 6, 1, 1, 8, 3, 2, 1, 1, 3, 6, 2, 8, 2, 1, 1, 0, 1, 0, 8, 1, 6, 0, 1, 1, 1, 4, 0, 1, 5, 8, 3, 4, 1, 8, 6, 5, 5, 0, 1, 5, 8, 8, 1, 3, 0, 6, 8, 5, 4, 0, 6, 1, 1, 3, 5, 8, 1, 6, 8, 0, 6, 0, 7, 0, 3, 0, 1, 3, 8, 0, 1, 8, 8, 3, 1, 8, 0, 7, 1, 0, 0, 0, 1, 2, 1, 8, 8, 1, 1, 5, 1, 8, 8, 1, 4, 1, 3, 6, 1, 6, 8, 0, 0, 6, 5, 6, 1, 1, 3, 0, 8, 6, 3, 0, 1, 8, 1, 7, 6, 0, 1, 3, 2, 6, 0, 7, 6, 6, 1, 0, 3, 8, 8, 8, 3, 8, 8, 0, 5, 6, 5, 0, 1, 0, 1, 0, 8, 8, 3, 7, 8, 1, 0, 0, 1, 1, 8, 2, 8, 7, 6, 8, 7, 8, 1, 1, 0, 5, 3, 4, 1, 1, 2, 6, 0, 5, 6, 8, 1, 8, 7, 0, 5, 1, 1, 5, 3, 8, 8, 1, 1, 1, 1, 8, 6, 8, 8, 8, 5, 8, 8, 6, 7, 5, 1, 3, 8, 0, 5, 8, 2, 0, 6, 0, 1, 5, 8, 1, 0, 1, 0, 8, 2, 5, 7, 1, 8, 4, 0, 0, 0, 6, 8, 3, 8, 2, 1, 3, 5, 2, 3, 7, 3, 6, 8, 4, 1, 5, 8, 3, 5, 1, 8, 8, 0, 6, 8, 1, 1, 0, 2, 6, 0, 8, 5, 5, 2, 0, 6, 2, 8, 3, 8, 7, 7, 1, 2, 8, 1, 6, 6, 1, 0, 5, 0, 3, 2, 8, 1, 1, 6, 2, 1, 6, 1, 0, 1, 8, 6, 3, 4, 0, 6, 2, 1, 5, 0, 6, 8, 4, 6, 0, 1, 8, 6, 5, 5, 1, 6, 1, 0, 3, 1, 0, 1, 8, 3, 5, 8, 1, 8, 8, 8, 0, 1, 3, 0, 8, 2, 6, 0, 7, 8, 8, 1, 3, 8, 8, 5, 8, 0, 4, 1, 0, 6, 1, 0, 5, 7, 1, 3, 5, 8, 3, 1, 5, 8, 6, 0, 1, 6, 6, 0, 0, 4, 8, 0, 0, 8, 1, 5, 6, 8, 1, 3, 1, 8, 6, 0, 2, 8, 1, 3, 8, 5, 1, 0, 3, 8, 1, 8, 5, 5, 4, 1, 8, 5, 8, 6, 0, 3, 5, 3, 8, 1, 6, 1, 6, 0, 0, 8, 8, 8, 0, 8, 2, 1, 7, 0, 5, 1, 3, 3, 8, 8, 1, 5, 3, 0, 8, 0, 8, 7, 8, 8, 8, 0, 3, 2, 5, 6, 0, 0, 2, 0, 8, 8, 0, 8, 5, 8, 1, 1, 1, 1, 5, 8, 2, 1, 2, 4, 0, 0, 3, 5]
# y_pred_flow = [2, 1, 6, 8, 8, 1, 0, 8, 1, 6, 1, 5, 1, 0, 8, 0, 6, 8, 8, 5, 1, 1, 8, 0, 1, 8, 0, 1, 5, 5, 0, 2, 0, 7, 8, 4, 1, 6, 0, 8, 8, 0, 3, 8, 1, 8, 3, 1, 8, 1, 0, 6, 8, 5, 5, 6, 0, 6, 1, 3, 4, 8, 5, 1, 3, 5, 0, 7, 0, 0, 8, 8, 4, 0, 8, 8, 0, 1, 3, 1, 1, 4, 1, 6, 1, 8, 5, 2, 0, 8, 1, 1, 8, 8, 1, 6, 1, 4, 6, 3, 5, 8, 6, 8, 6, 1, 3, 4, 8, 2, 1, 6, 0, 6, 8, 2, 1, 4, 5, 1, 0, 7, 7, 0, 1, 8, 6, 7, 3, 3, 5, 0, 5, 8, 5, 0, 0, 1, 0, 8, 8, 1, 2, 5, 1, 3, 0, 1, 6, 1, 1, 0, 1, 8, 8, 3, 6, 4, 8, 1, 8, 1, 3, 3, 5, 5, 1, 8, 8, 3, 8, 3, 5, 5, 1, 4, 0, 1, 0, 3, 5, 1, 8, 0, 5, 3, 5, 0, 3, 1, 6, 7, 2, 1, 6, 0, 1, 3, 1, 2, 2, 7, 1, 1, 2, 5, 8, 1, 3, 3, 3, 2, 6, 0, 5, 8, 8, 6, 8, 8, 6, 8, 1, 3, 8, 6, 0, 0, 1, 8, 0, 6, 6, 1, 6, 8, 4, 5, 0, 8, 5, 8, 8, 8, 8, 0, 8, 0, 0, 8, 3, 5, 2, 0, 6, 8, 8, 4, 5, 3, 1, 4, 4, 5, 6, 3, 8, 1, 3, 8, 3, 8, 8, 3, 8, 0, 1, 4, 3, 8, 6, 6, 6, 5, 1, 1, 8, 2, 8, 5, 8, 3, 3, 8, 3, 2, 2, 8, 8, 0, 6, 8, 8, 1, 1, 0, 4, 1, 1, 8, 6, 2, 6, 6, 1, 1, 8, 3, 2, 1, 1, 3, 6, 2, 8, 2, 1, 1, 0, 1, 0, 8, 1, 6, 0, 1, 1, 1, 4, 0, 1, 5, 8, 3, 4, 1, 8, 6, 5, 5, 0, 8, 5, 8, 8, 1, 3, 0, 6, 8, 5, 4, 0, 6, 1, 1, 3, 5, 8, 1, 6, 8, 0, 6, 0, 7, 0, 4, 0, 8, 8, 8, 0, 1, 8, 8, 3, 1, 8, 0, 7, 5, 0, 0, 0, 1, 2, 1, 8, 8, 1, 1, 5, 5, 8, 8, 1, 4, 1, 3, 6, 1, 6, 2, 0, 0, 6, 5, 6, 1, 1, 3, 0, 8, 6, 3, 0, 5, 8, 1, 7, 6, 0, 1, 3, 2, 6, 0, 7, 6, 6, 1, 0, 3, 8, 8, 8, 3, 8, 3, 0, 5, 8, 5, 0, 1, 5, 1, 0, 8, 8, 3, 7, 8, 1, 0, 0, 1, 1, 8, 2, 8, 7, 8, 8, 7, 8, 1, 1, 0, 5, 3, 4, 1, 1, 2, 6, 0, 5, 6, 8, 1, 8, 7, 0, 5, 1, 1, 5, 3, 8, 8, 1, 1, 1, 1, 8, 6, 8, 8, 8, 5, 8, 8, 6, 7, 5, 1, 8, 8, 0, 5, 8, 2, 0, 6, 0, 1, 5, 8, 1, 0, 5, 0, 0, 2, 5, 7, 1, 0, 4, 0, 0, 0, 6, 8, 3, 8, 2, 1, 3, 5, 2, 3, 6, 3, 6, 8, 4, 1, 5, 8, 8, 5, 8, 8, 8, 0, 6, 8, 1, 1, 0, 2, 6, 0, 8, 5, 5, 2, 0, 6, 2, 8, 3, 8, 7, 7, 1, 2, 8, 1, 6, 6, 1, 0, 5, 0, 3, 2, 8, 1, 1, 6, 2, 1, 6, 1, 0, 1, 8, 6, 3, 4, 0, 6, 2, 1, 5, 0, 6, 8, 4, 6, 0, 1, 8, 6, 5, 8, 1, 6, 1, 0, 3, 1, 0, 1, 8, 3, 5, 0, 1, 8, 8, 8, 0, 1, 3, 0, 8, 2, 6, 5, 7, 8, 8, 1, 3, 8, 8, 5, 2, 0, 4, 1, 8, 6, 1, 0, 5, 7, 1, 3, 5, 8, 3, 1, 5, 8, 6, 0, 1, 6, 6, 8, 0, 4, 8, 0, 0, 8, 1, 5, 6, 8, 1, 3, 1, 8, 6, 0, 2, 8, 1, 3, 8, 5, 1, 0, 3, 8, 7, 8, 5, 5, 4, 6, 8, 5, 8, 6, 0, 3, 5, 3, 8, 1, 6, 1, 6, 0, 0, 8, 8, 8, 0, 8, 2, 1, 7, 0, 5, 1, 3, 3, 8, 8, 1, 1, 3, 0, 8, 0, 8, 7, 8, 8, 8, 0, 3, 8, 5, 6, 0, 0, 2, 0, 1, 8, 0, 8, 5, 3, 1, 1, 1, 1, 5, 3, 2, 1, 2, 4, 0, 0, 3, 5]
# y_pred_rgb = [8, 1, 6, 8, 8, 1, 0, 8, 1, 6, 1, 5, 1, 0, 8, 0, 6, 8, 0, 5, 1, 1, 8, 0, 1, 8, 0, 1, 5, 5, 0, 2, 0, 7, 8, 4, 1, 8, 0, 8, 8, 0, 3, 0, 1, 8, 3, 8, 8, 1, 0, 6, 8, 8, 5, 1, 0, 6, 1, 3, 4, 8, 5, 1, 3, 1, 0, 7, 0, 0, 8, 3, 8, 0, 8, 5, 0, 1, 3, 1, 1, 4, 1, 6, 1, 8, 5, 8, 0, 5, 1, 1, 8, 8, 1, 6, 1, 8, 6, 8, 5, 8, 6, 1, 6, 1, 8, 4, 8, 2, 1, 6, 0, 8, 8, 8, 1, 4, 5, 1, 0, 7, 7, 0, 1, 8, 6, 7, 8, 8, 5, 0, 5, 3, 5, 0, 0, 1, 0, 5, 8, 1, 2, 5, 8, 3, 0, 1, 6, 1, 1, 0, 1, 0, 8, 3, 6, 8, 8, 1, 8, 1, 3, 3, 5, 5, 1, 8, 8, 3, 1, 3, 5, 5, 1, 8, 7, 1, 0, 3, 5, 1, 8, 0, 1, 3, 5, 0, 8, 1, 6, 7, 2, 1, 6, 8, 1, 8, 1, 2, 8, 7, 1, 1, 8, 5, 8, 1, 3, 3, 3, 2, 6, 0, 5, 8, 1, 6, 8, 8, 7, 8, 1, 3, 8, 6, 0, 0, 1, 0, 0, 6, 6, 1, 6, 8, 4, 5, 0, 8, 5, 8, 8, 8, 8, 0, 8, 0, 0, 8, 3, 5, 2, 0, 6, 8, 8, 4, 5, 3, 1, 4, 3, 8, 6, 3, 8, 1, 3, 8, 3, 8, 8, 3, 8, 8, 1, 4, 8, 8, 6, 7, 6, 5, 1, 1, 8, 2, 8, 5, 8, 3, 1, 8, 3, 8, 8, 8, 5, 0, 6, 8, 8, 1, 1, 8, 1, 1, 1, 8, 6, 2, 6, 6, 1, 1, 8, 3, 2, 1, 1, 3, 6, 2, 8, 2, 1, 8, 0, 1, 0, 8, 1, 6, 0, 8, 1, 1, 4, 0, 1, 5, 0, 3, 4, 1, 8, 6, 5, 5, 0, 1, 5, 8, 8, 1, 3, 0, 6, 8, 5, 4, 0, 6, 1, 8, 8, 5, 8, 1, 8, 8, 0, 6, 0, 7, 0, 3, 0, 1, 3, 8, 0, 1, 8, 8, 3, 1, 8, 0, 7, 1, 0, 0, 0, 1, 2, 1, 0, 8, 1, 1, 5, 1, 8, 8, 1, 4, 1, 3, 6, 1, 6, 8, 0, 8, 6, 5, 6, 1, 1, 3, 0, 8, 6, 3, 0, 1, 8, 1, 7, 6, 0, 1, 3, 2, 6, 0, 7, 6, 6, 1, 0, 3, 6, 8, 8, 3, 8, 8, 0, 5, 6, 5, 0, 1, 0, 1, 0, 8, 8, 3, 7, 8, 1, 0, 0, 1, 1, 8, 2, 8, 7, 6, 8, 7, 8, 1, 1, 0, 5, 3, 4, 1, 1, 2, 6, 0, 5, 6, 3, 1, 8, 7, 0, 5, 1, 1, 5, 3, 8, 8, 1, 1, 1, 1, 8, 6, 8, 3, 8, 5, 8, 8, 6, 7, 5, 1, 3, 8, 0, 8, 8, 2, 0, 6, 0, 1, 5, 8, 1, 0, 1, 0, 6, 8, 5, 7, 1, 8, 4, 0, 0, 0, 6, 8, 3, 8, 2, 1, 8, 5, 2, 3, 7, 8, 6, 3, 4, 1, 5, 8, 1, 5, 1, 8, 8, 0, 6, 8, 1, 1, 0, 2, 6, 0, 8, 5, 5, 2, 8, 6, 2, 8, 3, 8, 7, 7, 1, 2, 8, 1, 6, 6, 1, 0, 5, 0, 3, 2, 8, 1, 1, 6, 2, 1, 6, 1, 8, 1, 8, 6, 3, 4, 0, 6, 2, 1, 5, 0, 6, 8, 4, 6, 0, 1, 8, 6, 8, 5, 1, 6, 1, 0, 3, 1, 0, 1, 8, 3, 8, 8, 1, 8, 1, 8, 0, 1, 3, 0, 8, 2, 6, 0, 7, 8, 8, 1, 3, 8, 8, 5, 8, 0, 4, 3, 0, 6, 1, 0, 5, 7, 1, 3, 5, 8, 3, 1, 5, 8, 6, 0, 1, 6, 6, 0, 0, 4, 8, 8, 0, 8, 1, 5, 6, 8, 1, 3, 1, 8, 6, 0, 2, 8, 1, 3, 8, 5, 1, 0, 1, 8, 1, 8, 5, 5, 4, 1, 8, 5, 8, 6, 0, 3, 5, 3, 8, 1, 6, 1, 6, 0, 0, 8, 8, 8, 0, 0, 2, 1, 7, 0, 5, 1, 3, 3, 8, 8, 1, 5, 3, 0, 8, 0, 1, 7, 8, 8, 8, 0, 3, 2, 5, 8, 0, 0, 2, 0, 8, 8, 0, 8, 5, 8, 1, 1, 1, 1, 5, 8, 2, 1, 4, 4, 0, 0, 3, 5]
# y_true = [8, 1, 6, 8, 8, 1, 0, 8, 1, 6, 1, 5, 1, 0, 8, 0, 6, 8, 8, 5, 1, 3, 8, 0, 1, 8, 0, 1, 8, 5, 0, 2, 8, 7, 8, 4, 1, 6, 0, 8, 8, 0, 3, 0, 1, 8, 3, 8, 8, 1, 0, 6, 4, 5, 5, 1, 0, 6, 1, 3, 4, 8, 5, 1, 3, 0, 0, 7, 0, 0, 8, 3, 4, 0, 8, 4, 0, 1, 3, 1, 1, 4, 1, 6, 1, 8, 5, 8, 0, 4, 1, 1, 8, 8, 1, 6, 1, 8, 6, 4, 5, 8, 6, 4, 6, 1, 3, 4, 8, 2, 1, 6, 0, 7, 8, 2, 1, 4, 5, 1, 0, 7, 7, 0, 1, 8, 6, 7, 3, 3, 8, 8, 5, 8, 8, 0, 0, 1, 0, 8, 8, 1, 2, 8, 1, 3, 0, 1, 6, 1, 1, 0, 1, 0, 8, 3, 6, 4, 8, 1, 8, 1, 3, 3, 5, 5, 1, 8, 8, 3, 1, 3, 5, 8, 1, 8, 7, 1, 0, 3, 5, 1, 8, 0, 0, 3, 5, 0, 8, 1, 6, 7, 2, 1, 6, 0, 1, 4, 1, 2, 2, 7, 1, 1, 8, 5, 8, 1, 3, 3, 3, 2, 6, 0, 5, 8, 4, 6, 8, 8, 7, 8, 1, 3, 8, 6, 0, 0, 1, 0, 0, 6, 6, 1, 6, 8, 4, 8, 0, 8, 5, 8, 8, 8, 8, 0, 8, 0, 0, 8, 3, 5, 2, 0, 6, 8, 8, 4, 5, 3, 1, 4, 8, 5, 6, 3, 4, 1, 3, 8, 3, 8, 8, 3, 0, 8, 1, 4, 3, 8, 6, 7, 6, 5, 1, 1, 8, 2, 8, 5, 8, 3, 0, 8, 3, 2, 8, 8, 8, 0, 6, 8, 8, 1, 1, 0, 4, 1, 1, 8, 6, 2, 6, 8, 1, 1, 8, 8, 2, 1, 1, 3, 6, 2, 8, 2, 1, 1, 0, 1, 0, 8, 1, 6, 0, 1, 1, 1, 4, 0, 1, 5, 8, 3, 4, 1, 8, 6, 8, 5, 0, 1, 5, 8, 8, 1, 3, 0, 6, 8, 5, 4, 0, 6, 1, 1, 3, 5, 8, 1, 8, 8, 0, 6, 0, 7, 0, 3, 0, 1, 3, 0, 0, 1, 8, 0, 3, 1, 8, 0, 7, 1, 0, 0, 0, 1, 2, 1, 8, 8, 1, 1, 5, 0, 0, 4, 1, 4, 1, 3, 6, 1, 6, 8, 0, 8, 6, 5, 6, 1, 1, 3, 0, 8, 6, 3, 0, 1, 8, 1, 7, 6, 0, 1, 3, 2, 6, 0, 7, 6, 6, 1, 0, 3, 6, 8, 8, 3, 8, 3, 0, 5, 8, 5, 0, 1, 0, 1, 0, 0, 8, 3, 7, 8, 1, 0, 0, 1, 1, 8, 2, 8, 7, 8, 8, 7, 8, 1, 1, 0, 5, 3, 4, 1, 1, 2, 6, 0, 5, 6, 3, 1, 8, 7, 0, 5, 1, 1, 5, 3, 8, 8, 1, 1, 1, 1, 8, 6, 8, 8, 8, 5, 8, 8, 6, 7, 5, 1, 3, 4, 0, 5, 8, 2, 0, 6, 0, 1, 5, 8, 1, 0, 0, 0, 8, 2, 5, 7, 1, 8, 4, 0, 0, 0, 6, 8, 3, 8, 2, 1, 3, 5, 2, 3, 7, 3, 6, 3, 4, 1, 5, 8, 3, 5, 8, 8, 8, 0, 6, 8, 1, 1, 0, 2, 6, 0, 8, 8, 5, 2, 8, 6, 2, 8, 3, 8, 7, 7, 1, 2, 8, 1, 6, 6, 1, 0, 8, 0, 3, 2, 8, 1, 1, 6, 2, 1, 6, 1, 0, 1, 8, 6, 3, 4, 0, 6, 2, 1, 5, 0, 6, 8, 4, 6, 0, 1, 8, 6, 5, 8, 1, 6, 1, 0, 3, 1, 0, 1, 8, 3, 5, 0, 1, 8, 8, 8, 0, 1, 3, 0, 8, 2, 6, 0, 7, 8, 8, 1, 3, 8, 8, 5, 8, 0, 4, 1, 8, 6, 1, 0, 5, 7, 1, 3, 5, 8, 3, 1, 5, 8, 6, 0, 1, 6, 8, 0, 0, 4, 4, 8, 0, 8, 1, 5, 6, 8, 1, 3, 1, 8, 6, 0, 2, 8, 1, 3, 4, 5, 1, 0, 3, 8, 1, 8, 5, 5, 4, 1, 8, 5, 8, 6, 0, 3, 5, 3, 8, 1, 6, 1, 6, 0, 0, 8, 8, 8, 0, 8, 2, 1, 7, 0, 5, 1, 3, 3, 4, 8, 1, 5, 3, 0, 8, 0, 8, 7, 8, 8, 8, 0, 3, 2, 5, 6, 0, 0, 2, 0, 8, 8, 0, 8, 5, 8, 1, 1, 1, 1, 5, 8, 2, 1, 2, 4, 0, 0, 3, 5]
# y_pred = [2, 1, 6, 8, 8, 1, 0, 8, 1, 6, 1, 5, 1, 0, 8, 0, 8, 8, 8, 5, 1, 3, 8, 0, 1, 8, 0, 1, 5, 5, 0, 8, 0, 7, 8, 4, 1, 6, 0, 2, 8, 0, 3, 0, 1, 8, 3, 1, 8, 1, 0, 6, 8, 5, 5, 1, 0, 6, 1, 3, 4, 8, 5, 1, 3, 1, 0, 7, 0, 0, 8, 8, 4, 0, 8, 8, 0, 1, 3, 5, 1, 4, 1, 6, 1, 8, 5, 4, 0, 8, 1, 1, 8, 8, 1, 6, 1, 8, 6, 4, 5, 8, 6, 8, 6, 1, 6, 4, 8, 2, 1, 6, 0, 0, 8, 2, 1, 4, 5, 1, 0, 7, 7, 0, 1, 8, 6, 7, 8, 8, 5, 8, 5, 8, 5, 0, 0, 1, 0, 5, 8, 1, 6, 5, 1, 3, 0, 1, 6, 1, 1, 0, 1, 8, 8, 3, 6, 4, 8, 1, 8, 1, 3, 8, 5, 5, 1, 8, 8, 8, 1, 3, 5, 5, 1, 4, 0, 1, 0, 3, 5, 1, 8, 0, 3, 8, 5, 0, 8, 1, 6, 7, 3, 1, 6, 0, 1, 3, 1, 2, 4, 7, 1, 1, 2, 5, 8, 1, 3, 3, 3, 2, 6, 0, 5, 8, 4, 6, 8, 8, 0, 4, 1, 3, 4, 6, 0, 0, 1, 0, 0, 6, 6, 1, 6, 8, 4, 5, 0, 8, 5, 8, 8, 8, 8, 0, 8, 0, 0, 8, 3, 5, 2, 0, 6, 8, 8, 4, 5, 3, 1, 4, 3, 5, 6, 3, 8, 1, 3, 8, 3, 3, 8, 3, 8, 8, 1, 4, 3, 8, 6, 0, 6, 5, 1, 1, 8, 2, 8, 5, 8, 3, 1, 8, 3, 2, 8, 8, 8, 0, 6, 8, 8, 1, 1, 0, 8, 1, 1, 8, 6, 2, 6, 6, 1, 1, 8, 3, 2, 1, 1, 3, 6, 2, 8, 3, 1, 1, 0, 1, 0, 8, 1, 6, 0, 1, 1, 1, 4, 0, 1, 5, 8, 3, 4, 1, 8, 6, 5, 5, 0, 8, 5, 8, 8, 1, 3, 0, 6, 3, 5, 4, 0, 6, 1, 1, 3, 5, 8, 1, 0, 8, 0, 6, 0, 7, 0, 4, 0, 5, 8, 8, 0, 1, 8, 0, 3, 1, 8, 0, 7, 5, 0, 0, 0, 1, 2, 1, 8, 8, 1, 1, 5, 1, 8, 8, 1, 4, 1, 3, 6, 1, 6, 3, 0, 0, 6, 5, 6, 1, 1, 3, 0, 8, 6, 3, 0, 1, 8, 1, 7, 6, 0, 1, 3, 2, 6, 0, 7, 6, 6, 1, 0, 3, 8, 6, 8, 3, 8, 8, 0, 5, 8, 5, 0, 1, 0, 1, 0, 8, 8, 3, 7, 4, 1, 0, 0, 1, 1, 8, 2, 8, 7, 6, 8, 7, 8, 1, 1, 0, 5, 3, 4, 1, 1, 2, 6, 0, 5, 6, 3, 1, 8, 7, 0, 5, 1, 1, 5, 8, 8, 8, 1, 1, 1, 1, 8, 6, 8, 8, 8, 5, 8, 8, 6, 7, 5, 1, 4, 8, 0, 5, 8, 2, 0, 6, 0, 1, 5, 8, 1, 0, 1, 0, 8, 2, 5, 7, 1, 8, 4, 0, 0, 0, 6, 8, 3, 8, 2, 1, 3, 5, 2, 3, 0, 3, 6, 4, 4, 5, 5, 8, 3, 5, 8, 8, 8, 0, 0, 8, 1, 1, 0, 2, 6, 0, 8, 5, 5, 8, 0, 6, 2, 8, 3, 4, 7, 7, 1, 8, 8, 1, 6, 6, 1, 0, 5, 0, 3, 2, 8, 1, 1, 6, 2, 1, 6, 1, 0, 1, 8, 6, 3, 4, 0, 6, 2, 1, 5, 0, 6, 8, 4, 6, 0, 1, 2, 6, 5, 2, 1, 6, 1, 0, 3, 1, 0, 1, 0, 3, 5, 0, 1, 8, 8, 8, 0, 1, 3, 0, 8, 2, 6, 0, 7, 8, 8, 1, 3, 8, 8, 8, 2, 0, 4, 1, 0, 6, 1, 0, 5, 7, 1, 3, 5, 8, 3, 1, 5, 8, 6, 0, 1, 6, 6, 8, 0, 4, 4, 0, 0, 8, 1, 5, 6, 8, 1, 3, 1, 8, 6, 0, 2, 8, 1, 3, 8, 5, 1, 0, 3, 8, 4, 8, 5, 5, 4, 6, 8, 5, 8, 6, 0, 3, 5, 3, 8, 1, 6, 1, 6, 0, 0, 8, 8, 8, 0, 8, 2, 1, 4, 0, 5, 1, 3, 3, 8, 8, 1, 5, 3, 0, 8, 0, 8, 7, 8, 8, 4, 0, 3, 2, 5, 6, 0, 0, 2, 0, 8, 8, 0, 8, 5, 8, 1, 1, 1, 1, 5, 2, 2, 1, 2, 4, 0, 0, 3, 5]
y_true = [6, 0, 13, 14, 13, 14, 3, 13, 1, 13, 14, 4, 6, 13, 3, 6, 6, 13, 6, 13, 11, 0, 14, 0, 14, 13, 10, 2, 1, 4, 14, 0, 1, 14, 6, 13, 5, 7, 14, 3, 13, 5, 0, 1, 0, 13, 14, 1, 5, 14, 14, 14, 0, 13, 0, 2, 0, 0, 13, 14, 14, 13, 0, 3, 14, 14, 14, 6, 2, 0, 5, 13, 5, 14, 13, 14, 14, 1, 2, 14, 14, 5, 13, 3, 13, 1, 13, 2, 14, 14, 3, 3, 0, 0, 3, 14, 13, 2, 14, 0, 13, 14, 2, 14, 14, 14, 6, 1, 3, 13, 6, 13, 0, 1, 13, 13, 6, 1, 1, 1, 14, 13, 14, 14, 1, 14, 14, 1, 5, 0, 3, 1, 6, 13, 1, 13, 6, 1, 0, 5, 14, 14, 13, 14, 4, 14, 13, 5, 1, 2, 14, 1, 4, 1, 14, 2, 14, 7, 0, 0, 12, 1, 3, 1, 13, 13, 14, 14, 3, 7, 6, 0, 14, 5, 7, 14, 1, 14, 1, 13, 14, 1, 6, 0, 1, 14, 1, 13, 3, 6, 1, 1, 0, 14, 0, 1, 0, 6, 1, 2, 14, 13, 1, 3, 1, 1, 14, 4, 6, 6, 13, 7, 14, 14, 4, 1, 3, 1, 2, 1, 1, 1, 13, 8, 5, 0, 14, 1, 0, 14, 5, 0, 3, 14, 14, 13, 0, 4, 13, 6, 14, 13, 13, 6, 14, 12, 0, 0, 0, 2, 14, 1, 5, 14, 6, 1, 1, 0, 14, 14, 3, 0, 13, 3, 1, 13, 13, 1, 14, 13, 0, 3, 14, 6, 6, 0, 5, 10, 3, 1, 4, 3, 5, 6, 13, 3, 14, 1, 6, 13, 7, 1, 13, 9, 5, 13, 1, 14, 0, 1, 10, 1, 0, 14, 1, 14, 14, 1, 14, 0, 13, 13, 13, 0, 13, 13, 14, 14, 13, 1, 13, 6, 6, 1, 5, 13, 1, 14, 14, 14, 14, 14, 5, 0, 10, 14, 6, 14, 14, 14, 0, 4, 14, 13, 13, 13, 0, 6, 5, 14, 3, 14, 1, 14, 2, 14, 13, 4, 7, 13, 1, 0, 14, 1, 14, 13, 14, 14, 13, 13, 14, 13, 14, 5, 13, 2, 14, 7, 6, 0, 14, 13, 4, 0, 11, 6, 1, 1, 1, 3, 2, 8, 1, 2, 1, 1, 13, 1, 13, 4, 1, 13, 14, 13, 14, 3, 13, 1, 11, 5, 0, 4, 14, 13, 13, 1, 10, 0, 0, 0, 14, 6, 14, 13, 13, 1, 3, 7, 13, 2, 7, 13, 13, 1, 14, 7, 4, 7, 1, 0, 0, 3, 3, 14, 3, 1, 6, 1, 14, 1, 3, 0, 1, 0, 14, 2, 13, 5, 14, 2, 1, 14, 6, 3, 5, 14, 3, 13, 1, 14, 1, 13, 1, 5, 6, 13, 0, 0, 2, 5, 1, 14, 4, 14, 0, 1, 7, 6, 3, 10, 6, 14, 0, 14, 14, 4, 14, 14, 13, 3, 13, 0, 0, 13, 14, 14, 14, 14, 1, 14, 1, 14, 1, 5, 14, 2, 13, 14, 1, 6, 5, 13, 13, 2, 4, 1, 14, 0, 3, 0, 14, 1, 3, 4, 14, 5, 13, 1, 13, 1, 14, 1, 13, 13, 3, 6, 0, 1, 14, 4, 13, 14, 6, 13, 13, 14, 13, 10, 10, 14, 14, 1, 13, 13, 1, 14, 14, 4, 1, 14, 0, 13, 13, 5, 0, 13, 13, 13, 3, 14, 0, 14, 3, 0, 3, 14, 1, 14, 14, 13, 0, 0, 1, 5, 1, 3, 14, 13, 7, 13, 1, 13, 14, 14, 14, 1, 14, 6, 13, 13, 0, 13, 0, 13, 13, 3, 3, 13, 3, 0, 14, 14, 5, 13, 13, 1, 3, 11, 6, 13, 14, 14, 0, 5, 14, 6, 6, 6, 0, 0, 14, 0, 3, 13, 1, 1, 14, 13, 1, 13, 14, 5, 14, 1, 1, 14, 1, 6, 7, 13, 13, 13, 0, 13, 14, 13, 1, 13, 13, 9, 6, 4, 6, 1, 14, 14, 7, 13, 14, 13, 5, 7, 0, 1, 6, 6, 6, 1, 1, 13, 14, 1, 0, 14, 0, 0, 3, 1, 1, 3, 14, 5, 1, 13, 5, 14, 5, 5, 13, 13, 6, 1, 0, 14, 13, 13, 3, 7, 7, 14, 13, 4, 0, 13, 0, 2, 5, 1, 0, 0, 13, 14, 14, 5, 4, 5, 11, 1, 14, 3, 1, 1, 1, 13, 3, 5, 14, 1, 0, 14, 1, 1, 0, 13, 0, 14, 6, 14, 3, 7, 6, 9, 13, 4, 1, 6, 3, 14, 13, 14, 0, 6, 0, 6, 13, 1, 0, 14, 0, 14, 5, 3, 1, 0, 1, 14, 14, 7, 5, 14, 1, 7, 13, 3, 6, 5, 13, 3, 13, 0, 5, 13, 13, 13, 1, 0, 1, 13, 1, 3, 5, 4, 1, 1, 6, 1, 14, 14, 13, 1, 0, 14, 5, 3, 0, 13, 3, 0, 3, 14, 1, 14, 5, 1, 14, 0, 0, 14, 13, 2, 1, 3, 13, 1, 7, 1, 6, 6, 1, 14, 1, 13, 3, 1, 14, 1, 7, 7, 14, 14, 14, 10, 3, 14, 2, 0, 13, 14, 0, 3, 6, 4, 0, 5, 0, 14, 13, 14, 9, 13, 13, 13, 13, 14, 0, 1, 14, 13, 0, 14, 5, 5, 14, 14, 4, 2, 0, 4, 14, 13, 14, 14, 14, 5, 14, 1, 2, 1, 13, 5, 13, 1, 14, 2, 13, 13, 13, 14, 1, 0, 6, 14, 14, 1]
y_pred = [6, 0, 13, 14, 13, 14, 3, 13, 1, 13, 14, 4, 6, 13, 3, 6, 6, 13, 6, 13, 13, 0, 13, 0, 14, 13, 13, 2, 1, 12, 0, 0, 1, 14, 14, 13, 5, 7, 14, 3, 13, 5, 0, 1, 0, 13, 14, 1, 14, 14, 14, 14, 0, 13, 4, 2, 0, 0, 13, 14, 5, 13, 0, 3, 14, 14, 14, 6, 2, 0, 5, 13, 5, 14, 13, 14, 14, 1, 14, 14, 0, 5, 13, 3, 13, 1, 13, 2, 14, 14, 3, 3, 0, 0, 3, 14, 13, 2, 14, 0, 13, 14, 2, 5, 14, 14, 14, 1, 3, 13, 6, 13, 0, 1, 13, 13, 6, 1, 1, 1, 14, 13, 14, 14, 1, 14, 14, 1, 5, 0, 3, 14, 6, 13, 1, 13, 6, 1, 0, 5, 14, 14, 13, 14, 4, 14, 13, 5, 1, 2, 14, 1, 4, 1, 14, 2, 14, 7, 0, 0, 13, 1, 4, 1, 13, 13, 14, 5, 3, 14, 6, 0, 14, 5, 7, 14, 1, 14, 1, 13, 14, 1, 6, 0, 14, 14, 1, 13, 3, 6, 1, 1, 0, 5, 14, 1, 0, 6, 1, 2, 14, 13, 1, 3, 1, 14, 14, 4, 6, 6, 5, 7, 14, 14, 4, 1, 14, 1, 14, 1, 1, 1, 13, 13, 5, 0, 14, 1, 0, 14, 5, 0, 2, 14, 14, 13, 0, 14, 13, 6, 14, 13, 13, 6, 14, 13, 0, 0, 0, 2, 14, 1, 5, 14, 6, 1, 1, 0, 14, 14, 3, 0, 13, 3, 1, 13, 13, 1, 14, 13, 0, 3, 14, 6, 6, 0, 5, 13, 3, 1, 4, 3, 3, 14, 13, 3, 14, 1, 6, 13, 7, 1, 13, 14, 5, 13, 1, 14, 0, 1, 13, 1, 0, 14, 1, 14, 14, 1, 14, 0, 13, 13, 14, 0, 13, 13, 13, 14, 13, 1, 13, 6, 6, 1, 5, 13, 1, 14, 14, 14, 1, 14, 5, 14, 13, 14, 6, 14, 14, 14, 0, 4, 14, 13, 13, 13, 0, 6, 5, 13, 3, 5, 1, 14, 2, 14, 13, 4, 7, 13, 1, 0, 14, 1, 14, 13, 14, 14, 13, 13, 14, 13, 14, 5, 13, 2, 14, 14, 6, 0, 14, 13, 4, 0, 13, 6, 1, 1, 1, 3, 2, 13, 1, 2, 1, 1, 13, 1, 13, 4, 1, 13, 14, 13, 14, 3, 13, 1, 3, 3, 0, 4, 14, 3, 13, 1, 13, 0, 0, 0, 14, 6, 14, 13, 13, 1, 3, 7, 13, 2, 7, 13, 13, 1, 14, 7, 4, 0, 1, 0, 0, 0, 3, 14, 3, 1, 6, 1, 14, 14, 14, 0, 1, 0, 1, 2, 13, 5, 14, 1, 1, 14, 6, 3, 5, 14, 3, 13, 1, 14, 14, 13, 1, 5, 6, 13, 1, 0, 2, 5, 1, 14, 4, 14, 0, 1, 7, 6, 3, 13, 6, 14, 0, 14, 14, 3, 14, 14, 13, 3, 13, 0, 13, 13, 14, 14, 14, 14, 14, 2, 1, 14, 1, 5, 14, 2, 13, 13, 1, 6, 5, 13, 13, 13, 4, 1, 14, 0, 3, 0, 14, 1, 3, 3, 14, 5, 5, 1, 13, 1, 14, 1, 13, 13, 3, 6, 0, 1, 14, 4, 13, 14, 14, 13, 13, 14, 13, 13, 14, 14, 14, 1, 13, 13, 1, 14, 14, 4, 1, 14, 0, 13, 13, 5, 0, 13, 13, 13, 3, 14, 0, 6, 3, 0, 3, 14, 5, 14, 5, 13, 0, 0, 1, 5, 1, 3, 14, 13, 7, 13, 1, 13, 14, 14, 14, 1, 14, 6, 13, 13, 0, 13, 0, 13, 13, 3, 3, 13, 3, 0, 14, 14, 5, 13, 13, 1, 3, 1, 6, 13, 14, 14, 0, 5, 14, 6, 6, 6, 0, 0, 14, 3, 3, 13, 1, 1, 14, 13, 1, 13, 14, 5, 14, 1, 1, 14, 5, 6, 7, 13, 5, 13, 0, 13, 14, 13, 1, 13, 13, 13, 6, 3, 6, 1, 14, 14, 7, 13, 14, 3, 5, 7, 0, 1, 6, 6, 6, 1, 1, 13, 14, 1, 0, 14, 0, 0, 3, 1, 1, 3, 14, 5, 5, 13, 14, 14, 5, 14, 13, 13, 6, 14, 0, 14, 13, 13, 3, 7, 7, 14, 13, 4, 0, 13, 0, 2, 5, 1, 0, 0, 13, 14, 14, 5, 4, 5, 13, 1, 14, 3, 1, 1, 1, 13, 3, 5, 14, 1, 0, 14, 1, 1, 0, 13, 13, 14, 14, 14, 3, 0, 6, 13, 13, 4, 1, 6, 3, 14, 13, 14, 0, 6, 0, 6, 13, 1, 0, 14, 0, 14, 5, 3, 1, 0, 1, 14, 14, 7, 5, 14, 1, 7, 13, 3, 6, 5, 14, 3, 13, 0, 3, 13, 13, 13, 1, 0, 1, 13, 1, 3, 5, 4, 1, 1, 6, 1, 14, 14, 13, 1, 0, 0, 3, 3, 0, 13, 3, 0, 3, 14, 1, 6, 13, 1, 14, 0, 0, 14, 13, 2, 1, 3, 13, 1, 7, 1, 6, 6, 1, 14, 1, 13, 3, 1, 14, 1, 7, 7, 14, 14, 14, 13, 3, 14, 2, 0, 13, 14, 0, 3, 6, 4, 0, 5, 13, 14, 13, 14, 14, 13, 13, 13, 13, 0, 0, 1, 14, 13, 0, 14, 5, 5, 14, 14, 4, 2, 0, 4, 5, 13, 14, 14, 14, 5, 14, 1, 2, 1, 13, 5, 13, 1, 14, 2, 13, 13, 13, 14, 1, 0, 6, 14, 14, 1]
_SAMPLE_PATHS = {
'rgb': 'preprocess/data/rgb/', #'E:/dataset/instruments_video/UCF-101/', # '24881317_23_part_6.npy', #'./24881317_23_part_6_rgb.npy',
'flow': 'preprocess/data/flow/', #'E:/dataset/instruments_video/UCF-101/', #'preprocess/data/flow/24881317_23_part_6.npy',#v_BabyCrawling_g06_c05.npy',
}
error_path = 'preprocess/data/error'
test_path = 'E:/dataset/instruments_video/Video_9k_dataset_v3/label_9k/video_9k_test_list_v2.txt'
testpathlist = split_data(test_path)
# for i in range(len(y_pred)):
# if y_true[i] != y_pred[i]:
# pathlist = testpathlist[i]
# path = str(pathlist[0])
# videoarray, videolabel = batch2array(pathlist, 'rgb')
# # windowtitle = '_label'+ str(videolabel)+ '_predict' + str(y_pred[i]) + '_rgb' + str(y_pred_rgb[i]) + '_flow' + str(y_pred_flow[i]) #path +
# windowtitle = '_label' + str(videolabel) + '_predict' + str(y_pred[i])
# cv2.namedWindow(windowtitle, cv2.WINDOW_NORMAL)
# cv2.resizeWindow(windowtitle, 600, 600)
# fourcc = cv2.VideoWriter_fourcc(*'XVID')
# savepath = os.path.join(error_path, (path + windowtitle).split('/')[-1] + '.avi')
# print(savepath)
# out = cv2.VideoWriter(savepath, fourcc, 5, (224, 224))
# for j in range(15):
# # 定义解码器并创建VideoWrite对象
# # linux: XVID、X264; windows:DIVX
# # 20.0指定一分钟的帧数
# # 写入帧
# frame = videoarray[0][j]
# frame = np.array((frame + 1)/2*255,dtype= np.uint8)
# out.write(frame)
# cv2.imshow(windowtitle, frame)
#
# # cv2.waitKey(300)
# if cv2.waitKey(10) & 0xFF == ord('q'): # 适当调整等待时间
# continue
# out.release()
#
# cv2.destroyAllWindows()
labels = ['钢琴','吉他','萨克斯','笛子','葫芦丝','架子鼓','古筝','二胡','琵琶','唢呐','单簧管','小提琴','埙','跳舞','非乐器']
print(confusion_matrix(
y_true, # array, Gound true (correct) target values
y_pred, # array, Estimated targets as returned by a classifier
labels=None, # array, List of labels to index the matrix.
sample_weight=None # array-like of shape = [n_samples], Optional sample weights
))
tick_marks = np.array(range(len(labels))) + 0.5
plt.rcParams['font.sans-serif']=['SimHei'] #用来正常显示中文标签
fontsize = 12
def plot_confusion_matrix(cm, title='Confusion Matrix', cmap=plt.cm.Blues): #plt.cm.binary
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title, fontsize=22)
plt.colorbar()
xlocations = np.array(range(len(labels)))
plt.xticks(xlocations, labels, rotation=90, fontsize=14)
plt.yticks(xlocations, labels, fontsize=14)
plt.ylabel('真实类别', fontsize=fontsize)
plt.xlabel('预测类别', fontsize=fontsize)
cm = confusion_matrix(y_true, y_pred)
# np.set_printoptions(precision=2)
# cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
# print(cm_normalized)
plt.figure(figsize=(12, 8), dpi=120)
ind_array = np.arange(len(labels))
x, y = np.meshgrid(ind_array, ind_array)
for x_val, y_val in zip(x.flatten(), y.flatten()):
# c = cm_normalized[y_val][x_val]
c = cm[y_val][x_val]
# if c > 0.01:
if c>1:
# plt.text(x_val, y_val, "%.1f%%" % (c*100,), color='red', fontsize=fontsize, va='center', ha='center')
plt.text(x_val, y_val, c, color='red', fontsize=fontsize, va='center', ha='center')
# offset the tick
plt.gca().set_xticks(tick_marks, minor=True)
plt.gca().set_yticks(tick_marks, minor=True)
plt.gca().xaxis.set_ticks_position('none')
plt.gca().yaxis.set_ticks_position('none')
plt.grid(True, which='minor', linestyle='-')
plt.gcf().subplots_adjust(bottom=0.15)
plot_confusion_matrix(cm, title='乐器识别混淆矩阵') #cm_normalized
# show confusion matrix
# plt.savefig('../Data/confusion_matrix.png', format='png')
plt.show()
| 127.882845
| 3,189
| 0.455929
| 8,030
| 30,564
| 1.693649
| 0.043337
| 0.036912
| 0.014118
| 0.009118
| 0.649191
| 0.597353
| 0.568309
| 0.542353
| 0.519191
| 0.498824
| 0
| 0.374887
| 0.273263
| 30,564
| 239
| 3,190
| 127.882845
| 0.237394
| 0.705503
| 0
| 0
| 0
| 0
| 0.031877
| 0.01418
| 0
| 0
| 0
| 0
| 0.014925
| 1
| 0.044776
| false
| 0
| 0.104478
| 0
| 0.179104
| 0.014925
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
dd96f48aa0847d138b76390de9fd90d72efb3de5
| 28
|
py
|
Python
|
bulletin_board_bot/services/base_service.py
|
t3m8ch/bulletin-board-bot
|
c76dd041fdfc6de55f96cd88bc7cf16a2aae30a6
|
[
"MIT"
] | null | null | null |
bulletin_board_bot/services/base_service.py
|
t3m8ch/bulletin-board-bot
|
c76dd041fdfc6de55f96cd88bc7cf16a2aae30a6
|
[
"MIT"
] | null | null | null |
bulletin_board_bot/services/base_service.py
|
t3m8ch/bulletin-board-bot
|
c76dd041fdfc6de55f96cd88bc7cf16a2aae30a6
|
[
"MIT"
] | null | null | null |
class BaseService:
pass
| 9.333333
| 18
| 0.714286
| 3
| 28
| 6.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 28
| 2
| 19
| 14
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
06b8327ac3b21725c512b4704ace89fe152517e1
| 56
|
py
|
Python
|
path_checker/__init__.py
|
bgyori/path_checker
|
5a1f64795b0dd19357ab117b0d71fe3aa0003baa
|
[
"BSD-2-Clause"
] | null | null | null |
path_checker/__init__.py
|
bgyori/path_checker
|
5a1f64795b0dd19357ab117b0d71fe3aa0003baa
|
[
"BSD-2-Clause"
] | null | null | null |
path_checker/__init__.py
|
bgyori/path_checker
|
5a1f64795b0dd19357ab117b0d71fe3aa0003baa
|
[
"BSD-2-Clause"
] | null | null | null |
from .path_checker import PathChecker, HypothesisTester
| 28
| 55
| 0.875
| 6
| 56
| 8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.089286
| 56
| 1
| 56
| 56
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
06f8b1acfb270d8ac5addc4f10495d0d4a1f2f5b
| 256
|
py
|
Python
|
apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/hr_timesheet_sheet/models/__init__.py
|
gtfarng/Odoo_migrade
|
9cc28fae4c379e407645248a29d22139925eafe7
|
[
"Apache-2.0"
] | 1
|
2019-12-19T01:53:13.000Z
|
2019-12-19T01:53:13.000Z
|
apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/hr_timesheet_sheet/models/__init__.py
|
gtfarng/Odoo_migrade
|
9cc28fae4c379e407645248a29d22139925eafe7
|
[
"Apache-2.0"
] | null | null | null |
apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/hr_timesheet_sheet/models/__init__.py
|
gtfarng/Odoo_migrade
|
9cc28fae4c379e407645248a29d22139925eafe7
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import account_analytic_line
import hr_department
import hr_employee
import hr_timesheet_sheet
import hr_timesheet_sheet_config_settings
import res_company
| 25.6
| 74
| 0.832031
| 38
| 256
| 5.315789
| 0.736842
| 0.158416
| 0.168317
| 0.217822
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004444
| 0.121094
| 256
| 9
| 75
| 28.444444
| 0.893333
| 0.367188
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
66034992bbc0a9bc4e39cc02e53a3ad626ffe3ca
| 36
|
py
|
Python
|
piwars/core/exc.py
|
westpark/robotics
|
62546d0b2235b9ab73ec7968e2167f516a664c58
|
[
"MIT"
] | null | null | null |
piwars/core/exc.py
|
westpark/robotics
|
62546d0b2235b9ab73ec7968e2167f516a664c58
|
[
"MIT"
] | null | null | null |
piwars/core/exc.py
|
westpark/robotics
|
62546d0b2235b9ab73ec7968e2167f516a664c58
|
[
"MIT"
] | null | null | null |
class PiWarsError(Exception): pass
| 18
| 35
| 0.805556
| 4
| 36
| 7.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 36
| 1
| 36
| 36
| 0.90625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 1
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
6635388264863ffa022c3bee3657b0fa2bef164a
| 109
|
py
|
Python
|
tests/__init__.py
|
actus10/chameleon
|
c35f04738d2475d2db73643978f0f7e71b5a9936
|
[
"Apache-2.0"
] | 2
|
2021-01-22T21:07:52.000Z
|
2021-02-10T15:05:56.000Z
|
server/__init__.py
|
actus10/chameleon
|
c35f04738d2475d2db73643978f0f7e71b5a9936
|
[
"Apache-2.0"
] | 1
|
2021-04-30T20:59:53.000Z
|
2021-04-30T20:59:53.000Z
|
server/__init__.py
|
actus10/chameleon
|
c35f04738d2475d2db73643978f0f7e71b5a9936
|
[
"Apache-2.0"
] | 1
|
2021-09-05T02:18:57.000Z
|
2021-09-05T02:18:57.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2017, A10 Networks
# Author: Mike Thompson: @mike @t @a10@networks!com
#
| 27.25
| 51
| 0.651376
| 15
| 109
| 4.733333
| 0.8
| 0.309859
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.098901
| 0.165138
| 109
| 4
| 52
| 27.25
| 0.681319
| 0.917431
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b078f266bce7d2fa32ce8252bbd02d653453f27e
| 716
|
py
|
Python
|
sdk/python/pulumi_google_native/retail/__init__.py
|
AaronFriel/pulumi-google-native
|
75d1cda425e33d4610348972cd70bddf35f1770d
|
[
"Apache-2.0"
] | 44
|
2021-04-18T23:00:48.000Z
|
2022-02-14T17:43:15.000Z
|
sdk/python/pulumi_google_native/retail/__init__.py
|
AaronFriel/pulumi-google-native
|
75d1cda425e33d4610348972cd70bddf35f1770d
|
[
"Apache-2.0"
] | 354
|
2021-04-16T16:48:39.000Z
|
2022-03-31T17:16:39.000Z
|
sdk/python/pulumi_google_native/retail/__init__.py
|
AaronFriel/pulumi-google-native
|
75d1cda425e33d4610348972cd70bddf35f1770d
|
[
"Apache-2.0"
] | 8
|
2021-04-24T17:46:51.000Z
|
2022-01-05T10:40:21.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from .. import _utilities
import typing
# Make subpackages available:
if typing.TYPE_CHECKING:
import pulumi_google_native.retail.v2 as __v2
v2 = __v2
import pulumi_google_native.retail.v2alpha as __v2alpha
v2alpha = __v2alpha
import pulumi_google_native.retail.v2beta as __v2beta
v2beta = __v2beta
else:
v2 = _utilities.lazy_import('pulumi_google_native.retail.v2')
v2alpha = _utilities.lazy_import('pulumi_google_native.retail.v2alpha')
v2beta = _utilities.lazy_import('pulumi_google_native.retail.v2beta')
| 34.095238
| 80
| 0.755587
| 98
| 716
| 5.193878
| 0.44898
| 0.141454
| 0.212181
| 0.282908
| 0.489195
| 0.489195
| 0.253438
| 0
| 0
| 0
| 0
| 0.031561
| 0.159218
| 716
| 20
| 81
| 35.8
| 0.813953
| 0.263966
| 0
| 0
| 1
| 0
| 0.190019
| 0.190019
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.615385
| 0
| 0.615385
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b092fc42a4601a4c861467201cf2b293480246ea
| 29
|
py
|
Python
|
DataCurator-master-aa22cabe11ff989d4484434df222794be5015913/src/__init__.py
|
atanikan/Data-Curator
|
0f23b77b5cc9c2364308bd828d7d2ce290e06778
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
DataCurator-master-aa22cabe11ff989d4484434df222794be5015913/src/__init__.py
|
atanikan/Data-Curator
|
0f23b77b5cc9c2364308bd828d7d2ce290e06778
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
DataCurator-master-aa22cabe11ff989d4484434df222794be5015913/src/__init__.py
|
atanikan/Data-Curator
|
0f23b77b5cc9c2364308bd828d7d2ce290e06778
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
#__all__ = [ "DataCurator" ]
| 14.5
| 28
| 0.62069
| 2
| 29
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.172414
| 29
| 1
| 29
| 29
| 0.583333
| 0.931034
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b0b4472567948752792533b8b2c331f86d3a2253
| 6,666
|
py
|
Python
|
test/unit_tests/commons/configurator_test.py
|
btc-ag/revengtools
|
d58680ef7d6bdc8ef518860d5d13a5acc0d01758
|
[
"Apache-2.0"
] | 2
|
2019-07-15T14:59:59.000Z
|
2022-01-18T14:23:54.000Z
|
test/unit_tests/commons/configurator_test.py
|
btc-ag/revengtools
|
d58680ef7d6bdc8ef518860d5d13a5acc0d01758
|
[
"Apache-2.0"
] | 10
|
2018-05-03T13:25:07.000Z
|
2021-06-25T15:14:55.000Z
|
test/unit_tests/commons/configurator_test.py
|
btc-ag/revengtools
|
d58680ef7d6bdc8ef518860d5d13a5acc0d01758
|
[
"Apache-2.0"
] | 1
|
2018-05-02T13:59:27.000Z
|
2018-05-02T13:59:27.000Z
|
"""
Created on 29.09.2012
@author: SIGIESEC
"""
from commons.configurator import InstanceConfigurator
from test.unit_tests.commons.testdata.configurator_testdata import (
_OneImplementation, _OneImplementationAdditionalParam, _SecondInterface,
_ThirdImplementation, _FourthImplementation, _OtherImplementation)
import unittest
class InstanceConfiguratorTest(unittest.TestCase):
def test_get_concrete_adapter(self):
configuration = ((("test.unit_tests.commons.testdata.configurator_testdata", "_OtherInterface"),
(("test.unit_tests.commons.testdata.configurator_testdata", "_OtherImplementation"), dict())),
(("test.unit_tests.commons.testdata.configurator_testdata", "_SecondInterface"),
(("test.unit_tests.commons.testdata.configurator_testdata", "_SecondImplementation"), dict())),
)
configurator = InstanceConfigurator(configuration)
my_object = configurator.get_concrete_adapter(_SecondInterface())
self.assertEquals("test", my_object.second_method("test"))
def test_create_instance_no_additional_param(self):
configuration = ((("test.unit_tests.commons.testdata.configurator_testdata", "_OtherInterface"),
(("test.unit_tests.commons.testdata.configurator_testdata", "_OtherImplementation"), dict())),)
configurator = InstanceConfigurator(configuration)
my_object = configurator.create_instance(_OneImplementation)
self.assertEquals("test", my_object.my_method("test"))
def test_create_factory_no_additional_param(self):
configuration = ((("test.unit_tests.commons.testdata.configurator_testdata", "_OtherInterface"),
(("test.unit_tests.commons.testdata.configurator_testdata", "_OtherImplementation"), dict())),)
configurator = InstanceConfigurator(configuration)
my_object = configurator.create_factory(_OneImplementation)()
self.assertEquals("test", my_object.my_method("test"))
def test_create_instance_additional_param(self):
configuration = ((("test.unit_tests.commons.testdata.configurator_testdata", "_OtherInterface"),
(("test.unit_tests.commons.testdata.configurator_testdata", "_OtherImplementation"), dict())),)
configurator = InstanceConfigurator(configuration)
my_object = configurator.create_instance(_OneImplementationAdditionalParam, param="x")
self.assertEquals("testx", my_object.my_method("test"))
def test_create_factory_additional_param_prebound(self):
configuration = ((("test.unit_tests.commons.testdata.configurator_testdata", "_OtherInterface"),
(("test.unit_tests.commons.testdata.configurator_testdata", "_OtherImplementation"), dict())),)
configurator = InstanceConfigurator(configuration)
my_object = configurator.create_factory(_OneImplementationAdditionalParam, param="x")()
self.assertEquals("testx", my_object.my_method("test"))
def test_create_factory_additional_param(self):
configuration = ((("test.unit_tests.commons.testdata.configurator_testdata", "_OtherInterface"),
(("test.unit_tests.commons.testdata.configurator_testdata", "_OtherImplementation"), dict())),)
configurator = InstanceConfigurator(configuration)
my_object = configurator.create_factory(_OneImplementationAdditionalParam)(param="x")
self.assertEquals("testx", my_object.my_method("test"))
def test_create_factory_additional_param_positional(self):
configuration = ((("test.unit_tests.commons.testdata.configurator_testdata", "_OtherInterface"),
(("test.unit_tests.commons.testdata.configurator_testdata", "_OtherImplementation"), dict())),)
configurator = InstanceConfigurator(configuration)
my_object = configurator.create_factory(_OneImplementationAdditionalParam)("x")
self.assertEquals("testx", my_object.my_method("test"))
def test_create_factory_missing_additional_param(self):
configuration = ((("test.unit_tests.commons.testdata.configurator_testdata", "_OtherInterface"),
(("test.unit_tests.commons.testdata.configurator_testdata", "_OtherImplementation"), dict())),)
configurator = InstanceConfigurator(configuration)
self.assertRaises(TypeError, configurator.create_factory(_OneImplementationAdditionalParam))
# TODO check for string: __init__() takes at least 2 non-keyword arguments (1 given)
def test_create_instance_indirect_config_dependent(self):
configuration = ((("test.unit_tests.commons.testdata.configurator_testdata", "_OtherInterface"),
(("test.unit_tests.commons.testdata.configurator_testdata", "_OtherImplementation"), dict())),)
configurator = InstanceConfigurator(configuration)
my_object = configurator.create_factory(_ThirdImplementation)()
self.assertEquals("third", my_object.third_method())
def test_create_instance_indirect_object_factory(self):
configuration = ((("test.unit_tests.commons.testdata.configurator_testdata", "_OtherInterface"),
(("test.unit_tests.commons.testdata.configurator_testdata", "_OtherImplementation"), dict())),)
configurator = InstanceConfigurator(configuration)
my_object = configurator.create_instance(_FourthImplementation)
self.assertEquals("fourth", my_object.third_method())
# TODO add test case where an instance of a ConfigDependent class is injected
# TODO add failure test cases
def test_get_required_concrete_adapters(self):
configuration = ((("test.unit_tests.commons.testdata.configurator_testdata", "_OtherInterface"),
(("test.unit_tests.commons.testdata.configurator_testdata", "_OtherImplementation"), dict())),
(("test.unit_tests.commons.testdata.configurator_testdata", "_SecondInterface"),
(("test.unit_tests.commons.testdata.configurator_testdata", "_SecondImplementation"), dict())),
)
configurator = InstanceConfigurator(configuration)
actualRequired = frozenset(configurator.get_required_concrete_adapters(_OneImplementation))
expectedRequired = frozenset([_OtherImplementation])
self.assertEquals(expectedRequired,
actualRequired)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| 62.886792
| 121
| 0.706871
| 578
| 6,666
| 7.804498
| 0.16263
| 0.047883
| 0.07781
| 0.119707
| 0.780758
| 0.761694
| 0.753048
| 0.737974
| 0.737974
| 0.736422
| 0
| 0.001836
| 0.182868
| 6,666
| 105
| 122
| 63.485714
| 0.826326
| 0.040654
| 0
| 0.530864
| 0
| 0
| 0.304825
| 0.226504
| 0
| 0
| 0
| 0.009524
| 0.135802
| 1
| 0.135802
| false
| 0
| 0.037037
| 0
| 0.185185
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b0d82c256c202e14c381b78f0c69f7630b67303b
| 136
|
py
|
Python
|
python/hackerRank/angryProfessor/angry_professor.test.py
|
beasleyDOTcom/data-structures-and-algorithms
|
2915ed9d9ad5f2e8c983d0711c9e2b52e0ed14ff
|
[
"MIT"
] | 1
|
2021-09-01T01:39:22.000Z
|
2021-09-01T01:39:22.000Z
|
python/hackerRank/angryProfessor/angry_professor.test.py
|
beasleyDOTcom/data-structures-and-algorithms
|
2915ed9d9ad5f2e8c983d0711c9e2b52e0ed14ff
|
[
"MIT"
] | 47
|
2020-07-13T21:56:44.000Z
|
2021-03-06T03:53:25.000Z
|
python/hackerRank/angryProfessor/angry_professor.test.py
|
beasleyDOTcom/data-structures-and-algorithms
|
2915ed9d9ad5f2e8c983d0711c9e2b52e0ed14ff
|
[
"MIT"
] | null | null | null |
import angry_professor
print(angry_professor(3, [-2,-1,0,1,2]))
assert module.angryProfessor(3, [-2,-1,0,1,2]) == 'YES', "should be YES"
| 45.333333
| 72
| 0.683824
| 25
| 136
| 3.64
| 0.56
| 0.307692
| 0.065934
| 0.087912
| 0.131868
| 0.131868
| 0
| 0
| 0
| 0
| 0
| 0.096
| 0.080882
| 136
| 3
| 72
| 45.333333
| 0.632
| 0
| 0
| 0
| 0
| 0
| 0.116788
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0.333333
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
7c0d1d7fcf5396d3a386c9b7bbb72cf202ae5e26
| 49
|
py
|
Python
|
serving/fn/test_func.py
|
dmarcus-wire/sepsisDetection
|
2eceb96c4d8ff632bf191ec58927f59f84ca5916
|
[
"CC-BY-4.0"
] | null | null | null |
serving/fn/test_func.py
|
dmarcus-wire/sepsisDetection
|
2eceb96c4d8ff632bf191ec58927f59f84ca5916
|
[
"CC-BY-4.0"
] | null | null | null |
serving/fn/test_func.py
|
dmarcus-wire/sepsisDetection
|
2eceb96c4d8ff632bf191ec58927f59f84ca5916
|
[
"CC-BY-4.0"
] | null | null | null |
# simple unit test to test functionality locally
| 24.5
| 48
| 0.816327
| 7
| 49
| 5.714286
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.163265
| 49
| 1
| 49
| 49
| 0.97561
| 0.938776
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b0320484b12258dc622c833be09af402a834064f
| 7,469
|
py
|
Python
|
hyperbolic-rs/tests/test_loss.py
|
LeoLaugier/recommendation-rudders
|
159d9433885cfe8e324709a51dccde2243db1182
|
[
"Apache-2.0"
] | 9
|
2020-05-04T08:12:50.000Z
|
2021-06-06T20:37:53.000Z
|
hyperbolic-rs/tests/test_loss.py
|
LeoLaugier/recommendation-rudders
|
159d9433885cfe8e324709a51dccde2243db1182
|
[
"Apache-2.0"
] | 4
|
2020-07-02T11:17:48.000Z
|
2021-06-25T12:21:14.000Z
|
hyperbolic-rs/tests/test_loss.py
|
LeoLaugier/recommendation-rudders
|
159d9433885cfe8e324709a51dccde2243db1182
|
[
"Apache-2.0"
] | 5
|
2020-10-01T19:58:29.000Z
|
2021-07-16T02:59:23.000Z
|
# Copyright 2017 The Rudders Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from collections import namedtuple
from unittest.mock import MagicMock
from rudders.models import TransE
from rudders.utils import set_seed
from rudders.losses import BCELoss, HingeLoss
def get_flags(initializer='RandomUniform', regularizer='l2', dims=32, neg_sample_size=1,
entity_reg=0, relation_reg=0, batch_size=10, hinge_margin=1):
Flags = namedtuple("Flags", ['initializer', 'regularizer', 'dims', 'neg_sample_size', 'entity_reg', 'relation_reg',
'batch_size', 'hinge_margin'])
return Flags(
initializer=initializer,
regularizer=regularizer,
dims=dims,
neg_sample_size=neg_sample_size,
entity_reg=entity_reg,
relation_reg=relation_reg,
batch_size=batch_size,
hinge_margin=hinge_margin
)
class TestLoss(tf.test.TestCase):
def setUp(self):
super().setUp()
set_seed(42, set_tf_seed=True)
self.dtype = tf.float64
tf.keras.backend.set_floatx("float64")
self.flags = get_flags()
self.n_users = 2
self.n_items = 2
self.n_relations = 1
self.item_ids = [0, 1]
def get_model(self, n_users, n_items):
return TransE(n_users + n_items, self.n_relations, self.item_ids, self.flags)
def test_positive_sample_with_high_score_and_negative_sample_with_low_score_result_low_bce_loss(self):
score_pos = 50
score_neg = -50
def effect(*args, **kwargs):
# first call inside loss is with positive samples, second with negative
yield tf.convert_to_tensor([score_pos], dtype=self.dtype)
yield tf.convert_to_tensor([score_neg], dtype=self.dtype)
model = self.get_model(self.n_users, self.n_items)
model.call = MagicMock(side_effect=effect())
input_batch = tf.convert_to_tensor([[0, 1]], dtype=tf.int64)
loss = BCELoss(ini_neg_index=0, end_neg_index=self.n_users + self.n_items - 1, args=self.flags)
result = loss.calculate_loss(model, input_batch)
self.assertLess(result.numpy().item(), 0.0001)
def test_positive_sample_with_low_score_and_negative_sample_with_high_score_result_high_bce_loss(self):
score_pos = -50
score_neg = 50
def effect(*args, **kwargs):
# first call inside loss is with positive samples, second with negative
yield tf.convert_to_tensor([score_pos], dtype=self.dtype)
yield tf.convert_to_tensor([score_neg], dtype=self.dtype)
model = self.get_model(self.n_users, self.n_items)
model.call = MagicMock(side_effect=effect())
input_batch = tf.convert_to_tensor([[0, 1]], dtype=tf.int64)
loss = BCELoss(ini_neg_index=0, end_neg_index=self.n_users + self.n_items - 1, args=self.flags)
result = loss.calculate_loss(model, input_batch)
self.assertGreater(result.numpy().item(), 10)
def test_positive_sample_with_high_score_and_negative_sample_with_low_score_result_low_hinge_loss(self):
score_pos = 50
score_neg = -50
def effect(*args, **kwargs):
# first call inside loss is with positive samples, second with negative
yield tf.convert_to_tensor([score_pos], dtype=self.dtype)
yield tf.convert_to_tensor([score_neg], dtype=self.dtype)
model = self.get_model(self.n_users, self.n_items)
model.call = MagicMock(side_effect=effect())
input_batch = tf.convert_to_tensor([[0, 1]], dtype=tf.int64)
loss = HingeLoss(ini_neg_index=0, end_neg_index=self.n_users + self.n_items - 1, args=self.flags)
result = loss.calculate_loss(model, input_batch)
self.assertLess(result.numpy().item(), 0.0001)
def test_positive_sample_with_low_score_and_negative_sample_with_high_score_result_high_hinge_loss(self):
score_pos = -50
score_neg = 50
def effect(*args, **kwargs):
# first call inside loss is with positive samples, second with negative
yield tf.convert_to_tensor([score_pos], dtype=self.dtype)
yield tf.convert_to_tensor([score_neg], dtype=self.dtype)
model = self.get_model(self.n_users, self.n_items)
model.call = MagicMock(side_effect=effect())
input_batch = tf.convert_to_tensor([[0, 1]], dtype=tf.int64)
loss = HingeLoss(ini_neg_index=0, end_neg_index=self.n_users + self.n_items - 1, args=self.flags)
result = loss.calculate_loss(model, input_batch)
self.assertGreater(result.numpy().item(), 10)
def test_zero_score_result_bce_loss(self):
score_pos = 0
score_neg = 0
def effect(*args, **kwargs):
# first call inside loss is with positive samples, second with negative
yield tf.convert_to_tensor([score_pos], dtype=self.dtype)
yield tf.convert_to_tensor([score_neg], dtype=self.dtype)
model = self.get_model(self.n_users, self.n_items)
model.call = MagicMock(side_effect=effect())
input_batch = tf.convert_to_tensor([[0, 1]], dtype=tf.int64)
loss = BCELoss(ini_neg_index=0, end_neg_index=self.n_users + self.n_items - 1, args=self.flags)
result = loss.calculate_loss(model, input_batch)
self.assertGreater(result.numpy().item(), 0.5)
def test_equal_positive_score_results_high_bce_loss(self):
score_pos = 5
score_neg = 5
def effect(*args, **kwargs):
# first call inside loss is with positive samples, second with negative
yield tf.convert_to_tensor([score_pos], dtype=self.dtype)
yield tf.convert_to_tensor([score_neg], dtype=self.dtype)
model = self.get_model(self.n_users, self.n_items)
model.call = MagicMock(side_effect=effect())
input_batch = tf.convert_to_tensor([[0, 1]], dtype=tf.int64)
loss = BCELoss(ini_neg_index=0, end_neg_index=self.n_users + self.n_items - 1, args=self.flags)
result = loss.calculate_loss(model, input_batch)
self.assertGreater(result.numpy().item(), 1)
def test_equal_negative_score_results_high_bce_loss(self):
score_pos = -5
score_neg = -5
def effect(*args, **kwargs):
# first call inside loss is with positive samples, second with negative
yield tf.convert_to_tensor([score_pos], dtype=self.dtype)
yield tf.convert_to_tensor([score_neg], dtype=self.dtype)
model = self.get_model(self.n_users, self.n_items)
model.call = MagicMock(side_effect=effect())
input_batch = tf.convert_to_tensor([[0, 1]], dtype=tf.int64)
loss = BCELoss(ini_neg_index=0, end_neg_index=self.n_users + self.n_items - 1, args=self.flags)
result = loss.calculate_loss(model, input_batch)
self.assertGreater(result.numpy().item(), 1)
| 41.494444
| 119
| 0.680011
| 1,066
| 7,469
| 4.5
| 0.148218
| 0.034397
| 0.048155
| 0.074422
| 0.741714
| 0.715864
| 0.715864
| 0.715864
| 0.715864
| 0.715864
| 0
| 0.01882
| 0.217432
| 7,469
| 179
| 120
| 41.726257
| 0.801882
| 0.140313
| 0
| 0.598291
| 0
| 0
| 0.0175
| 0
| 0
| 0
| 0
| 0
| 0.059829
| 1
| 0.145299
| false
| 0
| 0.051282
| 0.008547
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b03784d3aea1ae6d7bed58cc0308333a3810e6e5
| 4,221
|
py
|
Python
|
plugins/module_utils/definitions/event_series.py
|
robertcsapo/dnacenter-ansible
|
33f776f8c0bc7113da73191c301dd1807e6b4a43
|
[
"MIT"
] | null | null | null |
plugins/module_utils/definitions/event_series.py
|
robertcsapo/dnacenter-ansible
|
33f776f8c0bc7113da73191c301dd1807e6b4a43
|
[
"MIT"
] | null | null | null |
plugins/module_utils/definitions/event_series.py
|
robertcsapo/dnacenter-ansible
|
33f776f8c0bc7113da73191c301dd1807e6b4a43
|
[
"MIT"
] | null | null | null |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
module_definition = json.loads(
"""{
"family": "event_management",
"name": "event_series",
"operations": {
"get": [
"get_notifications",
"count_of_notifications"
]
},
"parameters": {
"count_of_notifications": [
{
"name": "category",
"required": false,
"type": "string"
},
{
"name": "domain",
"required": false,
"type": "string"
},
{
"name": "end_time",
"required": false,
"type": "string"
},
{
"name": "event_ids",
"required": false,
"type": "string"
},
{
"name": "severity",
"required": false,
"type": "string"
},
{
"name": "source",
"required": false,
"type": "string"
},
{
"name": "start_time",
"required": false,
"type": "string"
},
{
"name": "sub_domain",
"required": false,
"type": "string"
},
{
"name": "type",
"required": false,
"type": "string"
},
{
"artificial": true,
"name": "count",
"required": true,
"type": "boolean"
}
],
"get_notifications": [
{
"name": "category",
"required": false,
"type": "string"
},
{
"name": "domain",
"required": false,
"type": "string"
},
{
"name": "end_time",
"required": false,
"type": "string"
},
{
"name": "event_ids",
"required": false,
"type": "string"
},
{
"name": "limit",
"required": false,
"type": "number"
},
{
"name": "offset",
"required": false,
"type": "number"
},
{
"name": "order",
"required": false,
"type": "string"
},
{
"name": "severity",
"required": false,
"type": "string"
},
{
"name": "sort_by",
"required": false,
"type": "string"
},
{
"name": "source",
"required": false,
"type": "string"
},
{
"name": "start_time",
"required": false,
"type": "string"
},
{
"name": "sub_domain",
"required": false,
"type": "string"
},
{
"name": "type",
"required": false,
"type": "string"
}
]
},
"responses": {
"count_of_notifications": {
"properties": [
"response"
],
"type": "object"
},
"get_notifications": {
"properties": [
"instanceId",
"eventId",
"name",
"namespace",
"description",
"type",
"category",
"severity",
"timestamp",
"domain",
"subDomain",
"source",
"context",
"details",
"tenantId"
],
"type": "object"
}
}
}"""
)
| 25.275449
| 66
| 0.298271
| 212
| 4,221
| 5.792453
| 0.273585
| 0.232899
| 0.30456
| 0.374593
| 0.610749
| 0.566775
| 0.566775
| 0.566775
| 0.566775
| 0.566775
| 0
| 0
| 0.568586
| 4,221
| 166
| 67
| 25.427711
| 0.674355
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 0.4
| 0.2
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
b0644d47e9f77e63d0aabab36336efa061157307
| 83
|
py
|
Python
|
src/FacebookMessengerAnalyzer/__init__.py
|
zegarmm001/FacebookMessengerAnalyzer
|
1383d8f618eacab8b5171311f43305537e336c80
|
[
"MIT"
] | 1
|
2021-03-22T13:09:23.000Z
|
2021-03-22T13:09:23.000Z
|
src/FacebookMessengerAnalyzer/__init__.py
|
zegarmm001/FacebookMessengerAnalyzer
|
1383d8f618eacab8b5171311f43305537e336c80
|
[
"MIT"
] | 1
|
2021-03-23T21:23:06.000Z
|
2021-03-23T21:23:06.000Z
|
src/FacebookMessengerAnalyzer/__init__.py
|
zegarmm001/FacebookMessengerAnalyzer
|
1383d8f618eacab8b5171311f43305537e336c80
|
[
"MIT"
] | null | null | null |
from FacebookMessengerAnalyzer.FacebookMessengerAnalyzer import IndividualMesseges
| 83
| 83
| 0.939759
| 5
| 83
| 15.6
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.048193
| 83
| 1
| 83
| 83
| 0.987342
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
b070eabe82fdfa7cf4e32d3196aebce3125b1d9f
| 176
|
py
|
Python
|
deep_sentinel/models/dnn/dataset/__init__.py
|
msakai/DeepSentinel
|
b090a74a54b4f162ce6f078b57976353dc276dec
|
[
"MIT"
] | 7
|
2018-07-17T05:29:30.000Z
|
2021-03-18T18:35:50.000Z
|
deep_sentinel/models/dnn/dataset/__init__.py
|
msakai/DeepSentinel
|
b090a74a54b4f162ce6f078b57976353dc276dec
|
[
"MIT"
] | null | null | null |
deep_sentinel/models/dnn/dataset/__init__.py
|
msakai/DeepSentinel
|
b090a74a54b4f162ce6f078b57976353dc276dec
|
[
"MIT"
] | 2
|
2018-07-17T12:50:22.000Z
|
2020-03-23T05:00:40.000Z
|
from .constants import CURRENT_DISCRETE, CURRENT_CONTINUOUS, NEXT_DISCRETE, NEXT_CONTINUOUS
from .create import create_dataset, split_dataset
from .extract import extract_from
| 44
| 91
| 0.869318
| 23
| 176
| 6.347826
| 0.478261
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 176
| 3
| 92
| 58.666667
| 0.9125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.