hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
2dfe0b429335644a54d1ebf633cee6f7f40b0692
210
py
Python
dagu/dagu_convert.py
patrickdehoon/dagu
71b6e5168afcdb1a9cdf0b5d6dd446a728c784e1
[ "MIT" ]
null
null
null
dagu/dagu_convert.py
patrickdehoon/dagu
71b6e5168afcdb1a9cdf0b5d6dd446a728c784e1
[ "MIT" ]
null
null
null
dagu/dagu_convert.py
patrickdehoon/dagu
71b6e5168afcdb1a9cdf0b5d6dd446a728c784e1
[ "MIT" ]
null
null
null
from datetime import datetime class DaguConvert: @staticmethod def execute(record, target_format): converted_record = datetime.strftime(record, target_format) return converted_record
21
67
0.742857
22
210
6.909091
0.636364
0.157895
0.236842
0
0
0
0
0
0
0
0
0
0.204762
210
9
68
23.333333
0.91018
0
0
0
0
0
0
0
0
0
0
0
0
1
0.166667
false
0
0.166667
0
0.666667
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
4
931277177cad13503ce28cb76c53e48e3e8e8f44
204
py
Python
rqw-settings.py
amywieliczka/harvester
ed21e0167ac5e6e6002fa7c89aa78cc1e93e29d8
[ "BSD-3-Clause" ]
5
2015-01-14T20:48:28.000Z
2015-05-13T15:31:12.000Z
rqw-settings.py
amywieliczka/harvester
ed21e0167ac5e6e6002fa7c89aa78cc1e93e29d8
[ "BSD-3-Clause" ]
87
2015-01-09T00:17:44.000Z
2021-12-13T19:37:44.000Z
rqw-settings.py
amywieliczka/harvester
ed21e0167ac5e6e6002fa7c89aa78cc1e93e29d8
[ "BSD-3-Clause" ]
4
2015-02-26T23:27:44.000Z
2019-06-11T21:43:17.000Z
REDIS_HOST="{{ redis_host }}" REDIS_PORT={{ redis_port }} REDIS_CONNECT_TIMEOUT={{ redis_connect_timeout }} QUEUES= [ 'high{{ name_suffix }}', 'normal{{ name_suffix }}', 'low{{ name_suffix }}']
34
61
0.656863
24
204
5.125
0.458333
0.243902
0.227642
0
0
0
0
0
0
0
0
0
0.147059
204
5
62
40.8
0.706897
0
0
0
0
0
0.392157
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
9327c7d9861e811692ede887d45d0fed725f2f36
92
py
Python
app/auth/signals.py
sololuz/cibb-web
665de9832e8a262f9051f4075572f5aed0553f6e
[ "BSD-3-Clause" ]
null
null
null
app/auth/signals.py
sololuz/cibb-web
665de9832e8a262f9051f4075572f5aed0553f6e
[ "BSD-3-Clause" ]
null
null
null
app/auth/signals.py
sololuz/cibb-web
665de9832e8a262f9051f4075572f5aed0553f6e
[ "BSD-3-Clause" ]
null
null
null
import django.dispatch user_registered = django.dispatch.Signal(providing_args=["user"])
15.333333
65
0.793478
11
92
6.454545
0.727273
0.394366
0
0
0
0
0
0
0
0
0
0
0.086957
92
5
66
18.4
0.845238
0
0
0
0
0
0.043956
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
9345c793ea8467850284372100fd002bf671beaf
331
py
Python
scraper/__init__.py
puchchi/stock_scraper_latest
09abfa12edbec0d6a65915db37ad3ad1b25fa092
[ "MIT" ]
null
null
null
scraper/__init__.py
puchchi/stock_scraper_latest
09abfa12edbec0d6a65915db37ad3ad1b25fa092
[ "MIT" ]
null
null
null
scraper/__init__.py
puchchi/stock_scraper_latest
09abfa12edbec0d6a65915db37ad3ad1b25fa092
[ "MIT" ]
null
null
null
from utility import * from items import * import logging #This will help in logging all spider logging.basicConfig(filename='C:\\Users\\anursin\\Documents\\GitHub/stock_scraper/stock_scraper.log',filemode='a',format='%(asctime)-15s : %(module)s : %(levelname)s : %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', level=logging.DEBUG)
47.285714
234
0.722054
50
331
4.74
0.76
0.101266
0
0
0
0
0
0
0
0
0
0.006623
0.087613
331
7
234
47.285714
0.778146
0.108761
0
0
0
0
0.498305
0.233898
0
0
0
0
0
1
0
true
0
0.75
0
0.75
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
4
93537377b2c8be53710b71f9aca4b154911cdfb0
34
py
Python
everyvoter_common/tests/__init__.py
everyvoter/everyvoter
65d9b8bdf9b5c64057135c279f6e03b6c207e0fa
[ "MIT" ]
5
2019-07-01T17:50:44.000Z
2022-02-20T02:44:42.000Z
everyvoter_common/tests/__init__.py
everyvoter/everyvoter
65d9b8bdf9b5c64057135c279f6e03b6c207e0fa
[ "MIT" ]
3
2020-06-05T21:44:33.000Z
2021-06-10T21:39:26.000Z
everyvoter_common/tests/__init__.py
everyvoter/everyvoter
65d9b8bdf9b5c64057135c279f6e03b6c207e0fa
[ "MIT" ]
1
2021-12-09T06:32:40.000Z
2021-12-09T06:32:40.000Z
"""Tests for EveryVoter Common"""
17
33
0.705882
4
34
6
1
0
0
0
0
0
0
0
0
0
0
0
0.117647
34
1
34
34
0.8
0.794118
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
936a753a2796896667aa782277be41b40af061d3
130
py
Python
crabageprediction/venv/Lib/site-packages/numpy/f2py/__main__.py
13rianlucero/CrabAgePrediction
92bc7fbe1040f49e820473e33cc3902a5a7177c7
[ "MIT" ]
20,453
2015-01-02T09:00:47.000Z
2022-03-31T23:35:56.000Z
crabageprediction/venv/Lib/site-packages/numpy/f2py/__main__.py
13rianlucero/CrabAgePrediction
92bc7fbe1040f49e820473e33cc3902a5a7177c7
[ "MIT" ]
14,862
2015-01-01T01:28:34.000Z
2022-03-31T23:48:52.000Z
crabageprediction/venv/Lib/site-packages/numpy/f2py/__main__.py
13rianlucero/CrabAgePrediction
92bc7fbe1040f49e820473e33cc3902a5a7177c7
[ "MIT" ]
9,362
2015-01-01T15:49:43.000Z
2022-03-31T21:26:51.000Z
# See: # https://web.archive.org/web/20140822061353/http://cens.ioc.ee/projects/f2py2e from numpy.f2py.f2py2e import main main()
21.666667
79
0.753846
20
130
4.9
0.85
0
0
0
0
0
0
0
0
0
0
0.158333
0.076923
130
5
80
26
0.658333
0.630769
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
9370550c637fb1198c44b75d3295338cdaee61a8
216
py
Python
src/main/python/algorithms/wiggleSort.py
asifkamalturzo/visualizer_integration
20f0f83bff3bba0f5cf52061f65aef33ada46a89
[ "MIT" ]
null
null
null
src/main/python/algorithms/wiggleSort.py
asifkamalturzo/visualizer_integration
20f0f83bff3bba0f5cf52061f65aef33ada46a89
[ "MIT" ]
18
2021-10-01T14:27:14.000Z
2021-10-01T19:30:58.000Z
src/main/python/algorithms/wiggleSort.py
asifkamalturzo/visualizer_integration
20f0f83bff3bba0f5cf52061f65aef33ada46a89
[ "MIT" ]
1
2021-11-06T19:47:14.000Z
2021-11-06T19:47:14.000Z
def wiggleSort(array, *args): for i in range(len(array)): if (i % 2 == 1) == (array[i - 1] > array[i]): array[i - 1], array[i] = array[i], array[i - 1] yield array, i, -1 , i+1, -1
43.2
59
0.462963
36
216
2.777778
0.361111
0.42
0.28
0.36
0.39
0.32
0.32
0
0
0
0
0.054795
0.324074
216
5
60
43.2
0.630137
0
0
0
0
0
0
0
0
0
0
0
0
1
0.2
false
0
0
0
0.2
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
fa7338386a506c154acf4ea0d70bcd3bddd69f5d
126
py
Python
backlogr/react/apps.py
frnkn/backlogr_py
fa72d4bf84431d2c120199c7d2a37643e8a8bd98
[ "Unlicense" ]
null
null
null
backlogr/react/apps.py
frnkn/backlogr_py
fa72d4bf84431d2c120199c7d2a37643e8a8bd98
[ "Unlicense" ]
null
null
null
backlogr/react/apps.py
frnkn/backlogr_py
fa72d4bf84431d2c120199c7d2a37643e8a8bd98
[ "Unlicense" ]
null
null
null
from __future__ import unicode_literals from django.apps import AppConfig class ReactConfig(AppConfig): name = 'react'
15.75
39
0.785714
15
126
6.266667
0.8
0
0
0
0
0
0
0
0
0
0
0
0.15873
126
7
40
18
0.886792
0
0
0
0
0
0.039683
0
0
0
0
0
0
1
0
false
0
0.5
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
fa841a8f30c242b0844d5c45a514ece03b3bab50
33
py
Python
src/__init__.py
KILFaT/solversode
47dfbd2930dfc3d2f38aebbc040ae03712febd72
[ "Apache-2.0" ]
null
null
null
src/__init__.py
KILFaT/solversode
47dfbd2930dfc3d2f38aebbc040ae03712febd72
[ "Apache-2.0" ]
null
null
null
src/__init__.py
KILFaT/solversode
47dfbd2930dfc3d2f38aebbc040ae03712febd72
[ "Apache-2.0" ]
null
null
null
from src import module CONST1=42
11
22
0.818182
6
33
4.5
1
0
0
0
0
0
0
0
0
0
0
0.107143
0.151515
33
3
23
11
0.857143
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
faaf0e65bb18129a9b7def4f2d8bc09b2f7667aa
175
py
Python
pyth_ans_ecourse/class9/exercise8/mytest/whatever.py
fallenfuzz/pynet
9624d83cca160fd325a34e838e4474c9b80fe2ab
[ "Apache-2.0" ]
528
2015-01-07T15:28:51.000Z
2022-03-27T09:45:37.000Z
pyth_ans_ecourse/class9/exercise8/mytest/whatever.py
fallenfuzz/pynet
9624d83cca160fd325a34e838e4474c9b80fe2ab
[ "Apache-2.0" ]
19
2015-07-01T23:52:27.000Z
2021-09-22T04:30:34.000Z
pyth_ans_ecourse/class9/exercise8/mytest/whatever.py
fallenfuzz/pynet
9624d83cca160fd325a34e838e4474c9b80fe2ab
[ "Apache-2.0" ]
555
2015-01-18T07:21:43.000Z
2022-03-20T21:25:22.000Z
''' Python class on writing reusable code ''' def func3(): '''Simple test function''' print "Whatever" if __name__ == "__main__": print "Main program - whatever"
17.5
37
0.645714
20
175
5.25
0.85
0
0
0
0
0
0
0
0
0
0
0.007246
0.211429
175
9
38
19.444444
0.753623
0
0
0
0
0
0.375
0
0
0
0
0
0
0
null
null
0
0
null
null
0.5
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
1
0
4
faaf2ca989a68635d615b52bbff8f5620eb7c3d3
331
py
Python
api/routes.py
amjed-ali-k/polmo-fastapi-backend
64e64da1e018773970ec9dccf34501079c072d99
[ "Apache-2.0" ]
3
2021-08-04T08:43:35.000Z
2022-03-07T10:15:35.000Z
api/routes.py
amjed-ali-k/polmo-fastapi-backend
64e64da1e018773970ec9dccf34501079c072d99
[ "Apache-2.0" ]
null
null
null
api/routes.py
amjed-ali-k/polmo-fastapi-backend
64e64da1e018773970ec9dccf34501079c072d99
[ "Apache-2.0" ]
1
2021-08-01T05:47:21.000Z
2021-08-01T05:47:21.000Z
import fastapi from fastapi import responses router = fastapi.APIRouter() @router.get('/', include_in_schema=False) def index(): return responses.RedirectResponse(url='/redoc') @router.get('/favicon.ico', include_in_schema=False) def favicon(): return fastapi.responses.RedirectResponse(url='/static/icons/favicon.ico')
25.461538
78
0.761329
41
331
6.04878
0.512195
0.072581
0.120968
0.16129
0.185484
0
0
0
0
0
0
0
0.096677
331
13
78
25.461538
0.829431
0
0
0
0
0
0.13253
0.075301
0
0
0
0
0
1
0.222222
false
0
0.222222
0.222222
0.666667
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
4
fab8c457ff9cfd23c3110eccbdfa55a6011aa972
129
py
Python
solutions/correlation_matrix.py
kant/LearnAI-ADPM
7cbfb4fed04056f4c89b2d0beebb2bfa78cc0931
[ "CC-BY-4.0", "MIT" ]
null
null
null
solutions/correlation_matrix.py
kant/LearnAI-ADPM
7cbfb4fed04056f4c89b2d0beebb2bfa78cc0931
[ "CC-BY-4.0", "MIT" ]
null
null
null
solutions/correlation_matrix.py
kant/LearnAI-ADPM
7cbfb4fed04056f4c89b2d0beebb2bfa78cc0931
[ "CC-BY-4.0", "MIT" ]
null
null
null
corr = df_all.corr() sns.heatmap(corr, xticklabels=corr.columns.values, yticklabels=corr.columns.values)
25.8
44
0.651163
15
129
5.533333
0.6
0.26506
0.409639
0
0
0
0
0
0
0
0
0
0.232558
129
4
45
32.25
0.838384
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
fac1be9d2801440b4f592a5274d99cbe7a28eeab
192
py
Python
api/v1/serializers/provider_type_serializer.py
xuhang57/atmosphere
f53fea2a74ee89ccc8852906799b1d9a7e9178b7
[ "BSD-3-Clause" ]
null
null
null
api/v1/serializers/provider_type_serializer.py
xuhang57/atmosphere
f53fea2a74ee89ccc8852906799b1d9a7e9178b7
[ "BSD-3-Clause" ]
null
null
null
api/v1/serializers/provider_type_serializer.py
xuhang57/atmosphere
f53fea2a74ee89ccc8852906799b1d9a7e9178b7
[ "BSD-3-Clause" ]
null
null
null
from core.models.provider import ProviderType from rest_framework import serializers class ProviderTypeSerializer(serializers.ModelSerializer): class Meta: model = ProviderType
21.333333
58
0.802083
19
192
8.052632
0.736842
0
0
0
0
0
0
0
0
0
0
0
0.15625
192
8
59
24
0.944444
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.4
0
0.8
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
fac43f8ea4d3186d899bd7f147e158087ff16bce
134
py
Python
lib/ga_net/layer_utils.py
markstrefford/GA_CNN
fe5d71c16a2a413a97c69cff94d3e0c0cc69e9cc
[ "MIT" ]
null
null
null
lib/ga_net/layer_utils.py
markstrefford/GA_CNN
fe5d71c16a2a413a97c69cff94d3e0c0cc69e9cc
[ "MIT" ]
null
null
null
lib/ga_net/layer_utils.py
markstrefford/GA_CNN
fe5d71c16a2a413a97c69cff94d3e0c0cc69e9cc
[ "MIT" ]
null
null
null
# # layer_utils.py # # Utility functions for generating a NN using GA # # Written by Mark Strefford # (c) 2021 Delirium Digital Ltd #
14.888889
48
0.723881
20
134
4.8
1
0
0
0
0
0
0
0
0
0
0
0.037037
0.19403
134
8
49
16.75
0.851852
0.873134
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
87b55aebbb906dfc5ba15c69828c537c14352bd4
17
py
Python
T32-05/program.py
miguelgaoreiss/SSof-Project1920
0bf74c264e06966931d6a2e0b42134dfddc32eb4
[ "MIT" ]
2
2019-11-20T19:26:07.000Z
2019-11-22T00:42:23.000Z
T32-05/program.py
miguelgaoreiss/SSof-Project1920
0bf74c264e06966931d6a2e0b42134dfddc32eb4
[ "MIT" ]
2
2019-11-28T05:21:24.000Z
2019-11-28T05:21:58.000Z
T32-05/program.py
miguelgaoreiss/SSof-Project1920
0bf74c264e06966931d6a2e0b42134dfddc32eb4
[ "MIT" ]
25
2019-11-27T01:40:56.000Z
2019-12-04T23:38:59.000Z
snk(san(src(x)))
8.5
16
0.588235
4
17
2.5
1
0
0
0
0
0
0
0
0
0
0
0
0.058824
17
1
17
17
0.625
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
87f80ab122296a9108c7ac60b8066e6128c5d347
56
py
Python
B_Python_and_friends/solutions/ex1_2.py
oercompbiomed/CBM101
20010dcb99fbf218c4789eb5918dcff8ceb94898
[ "MIT" ]
7
2019-07-03T07:41:55.000Z
2022-02-06T20:25:37.000Z
beginners-guide/solutions/ex1_2.py
oercompbiomed/Seili-2020
1f7490d58759098de28494f4580af3c700f57bd4
[ "MIT" ]
9
2019-03-14T15:15:09.000Z
2019-08-01T14:18:21.000Z
B_Python_and_friends/solutions/ex1_2.py
oercompbiomed/CBM101
20010dcb99fbf218c4789eb5918dcff8ceb94898
[ "MIT" ]
11
2019-03-12T10:43:11.000Z
2021-10-05T12:15:00.000Z
ln = len(long_list) index = int(ln/2) long_list[:index]
14
19
0.696429
11
56
3.363636
0.636364
0.432432
0.702703
0
0
0
0
0
0
0
0
0.020408
0.125
56
4
20
14
0.734694
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
87ffa34692350acd5d1c315bc181b5810b0f5dc0
113
py
Python
Python/Console/RawInput.py
piovezan/SOpt
a5ec90796b7bdf98f0675457fc4bb99c8695bc40
[ "MIT" ]
148
2017-08-03T01:49:27.000Z
2022-03-26T10:39:30.000Z
Python/Console/RawInput.py
piovezan/SOpt
a5ec90796b7bdf98f0675457fc4bb99c8695bc40
[ "MIT" ]
3
2017-11-23T19:52:05.000Z
2020-04-01T00:44:40.000Z
Python/Console/RawInput.py
piovezan/SOpt
a5ec90796b7bdf98f0675457fc4bb99c8695bc40
[ "MIT" ]
59
2017-08-03T01:49:19.000Z
2022-03-31T23:24:38.000Z
name = raw_input("tell me your name:") print 'Hello', str(name), '!' #https://pt.stackoverflow.com/q/204509/101
22.6
42
0.681416
18
113
4.222222
0.888889
0
0
0
0
0
0
0
0
0
0
0.089109
0.106195
113
4
43
28.25
0.663366
0.362832
0
0
0
0
0.338028
0
0
0
0
0
0
0
null
null
0
0
null
null
0.5
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
1
0
4
354c8e04625c33f3879a94969ee3444d0288555d
33,748
py
Python
tccli/services/cmq/cmq_client.py
bopopescu/tencentcloud-cli-intl-en
e6317252557095dd10018226244e636daa4a3c67
[ "Apache-2.0" ]
null
null
null
tccli/services/cmq/cmq_client.py
bopopescu/tencentcloud-cli-intl-en
e6317252557095dd10018226244e636daa4a3c67
[ "Apache-2.0" ]
null
null
null
tccli/services/cmq/cmq_client.py
bopopescu/tencentcloud-cli-intl-en
e6317252557095dd10018226244e636daa4a3c67
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- import os import json import tccli.options_define as OptionsDefine import tccli.format_output as FormatOutput from tccli.nice_command import NiceCommand import tccli.error_msg as ErrorMsg import tccli.help_template as HelpTemplate from tccli import __version__ from tccli.utils import Utils from tccli.configure import Configure from tencentcloud.common import credential from tencentcloud.common.profile.http_profile import HttpProfile from tencentcloud.common.profile.client_profile import ClientProfile from tencentcloud.cmq.v20190304 import cmq_client as cmq_client_v20190304 from tencentcloud.cmq.v20190304 import models as models_v20190304 from tccli.services.cmq import v20190304 from tccli.services.cmq.v20190304 import help as v20190304_help def doCreateTopic(argv, arglist): g_param = parse_global_arg(argv) if "help" in argv: show_help("CreateTopic", g_param[OptionsDefine.Version]) return param = { "TopicName": argv.get("--TopicName"), "MaxMsgSize": Utils.try_to_json(argv, "--MaxMsgSize"), "FilterType": Utils.try_to_json(argv, "--FilterType"), "MsgRetentionSeconds": Utils.try_to_json(argv, "--MsgRetentionSeconds"), "Trace": Utils.try_to_json(argv, "--Trace"), } cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey]) http_profile = HttpProfile( reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]), reqMethod="POST", endpoint=g_param[OptionsDefine.Endpoint] ) profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256") mod = CLIENT_MAP[g_param[OptionsDefine.Version]] client = mod.CmqClient(cred, g_param[OptionsDefine.Region], profile) client._sdkVersion += ("_CLI_" + __version__) models = MODELS_MAP[g_param[OptionsDefine.Version]] model = models.CreateTopicRequest() model.from_json_string(json.dumps(param)) rsp = client.CreateTopic(model) result = rsp.to_json_string() jsonobj = None try: jsonobj = json.loads(result) except TypeError as e: jsonobj = json.loads(result.decode('utf-8')) # python3.3 FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter]) def doCreateSubscribe(argv, arglist): g_param = parse_global_arg(argv) if "help" in argv: show_help("CreateSubscribe", g_param[OptionsDefine.Version]) return param = { "TopicName": argv.get("--TopicName"), "SubscriptionName": argv.get("--SubscriptionName"), "Protocol": argv.get("--Protocol"), "Endpoint": argv.get("--Endpoint"), "NotifyStrategy": argv.get("--NotifyStrategy"), "FilterTag": Utils.try_to_json(argv, "--FilterTag"), "BindingKey": Utils.try_to_json(argv, "--BindingKey"), "NotifyContentFormat": argv.get("--NotifyContentFormat"), } cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey]) http_profile = HttpProfile( reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]), reqMethod="POST", endpoint=g_param[OptionsDefine.Endpoint] ) profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256") mod = CLIENT_MAP[g_param[OptionsDefine.Version]] client = mod.CmqClient(cred, g_param[OptionsDefine.Region], profile) client._sdkVersion += ("_CLI_" + __version__) models = MODELS_MAP[g_param[OptionsDefine.Version]] model = models.CreateSubscribeRequest() model.from_json_string(json.dumps(param)) rsp = client.CreateSubscribe(model) result = rsp.to_json_string() jsonobj = None try: jsonobj = json.loads(result) except TypeError as e: jsonobj = json.loads(result.decode('utf-8')) # python3.3 FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter]) def doModifyTopicAttribute(argv, arglist): g_param = parse_global_arg(argv) if "help" in argv: show_help("ModifyTopicAttribute", g_param[OptionsDefine.Version]) return param = { "TopicName": argv.get("--TopicName"), "MaxMsgSize": Utils.try_to_json(argv, "--MaxMsgSize"), "MsgRetentionSeconds": Utils.try_to_json(argv, "--MsgRetentionSeconds"), "Trace": Utils.try_to_json(argv, "--Trace"), } cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey]) http_profile = HttpProfile( reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]), reqMethod="POST", endpoint=g_param[OptionsDefine.Endpoint] ) profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256") mod = CLIENT_MAP[g_param[OptionsDefine.Version]] client = mod.CmqClient(cred, g_param[OptionsDefine.Region], profile) client._sdkVersion += ("_CLI_" + __version__) models = MODELS_MAP[g_param[OptionsDefine.Version]] model = models.ModifyTopicAttributeRequest() model.from_json_string(json.dumps(param)) rsp = client.ModifyTopicAttribute(model) result = rsp.to_json_string() jsonobj = None try: jsonobj = json.loads(result) except TypeError as e: jsonobj = json.loads(result.decode('utf-8')) # python3.3 FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter]) def doClearSubscriptionFilterTags(argv, arglist): g_param = parse_global_arg(argv) if "help" in argv: show_help("ClearSubscriptionFilterTags", g_param[OptionsDefine.Version]) return param = { "TopicName": argv.get("--TopicName"), "SubscriptionName": argv.get("--SubscriptionName"), } cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey]) http_profile = HttpProfile( reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]), reqMethod="POST", endpoint=g_param[OptionsDefine.Endpoint] ) profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256") mod = CLIENT_MAP[g_param[OptionsDefine.Version]] client = mod.CmqClient(cred, g_param[OptionsDefine.Region], profile) client._sdkVersion += ("_CLI_" + __version__) models = MODELS_MAP[g_param[OptionsDefine.Version]] model = models.ClearSubscriptionFilterTagsRequest() model.from_json_string(json.dumps(param)) rsp = client.ClearSubscriptionFilterTags(model) result = rsp.to_json_string() jsonobj = None try: jsonobj = json.loads(result) except TypeError as e: jsonobj = json.loads(result.decode('utf-8')) # python3.3 FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter]) def doDeleteSubscribe(argv, arglist): g_param = parse_global_arg(argv) if "help" in argv: show_help("DeleteSubscribe", g_param[OptionsDefine.Version]) return param = { "TopicName": argv.get("--TopicName"), "SubscriptionName": argv.get("--SubscriptionName"), } cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey]) http_profile = HttpProfile( reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]), reqMethod="POST", endpoint=g_param[OptionsDefine.Endpoint] ) profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256") mod = CLIENT_MAP[g_param[OptionsDefine.Version]] client = mod.CmqClient(cred, g_param[OptionsDefine.Region], profile) client._sdkVersion += ("_CLI_" + __version__) models = MODELS_MAP[g_param[OptionsDefine.Version]] model = models.DeleteSubscribeRequest() model.from_json_string(json.dumps(param)) rsp = client.DeleteSubscribe(model) result = rsp.to_json_string() jsonobj = None try: jsonobj = json.loads(result) except TypeError as e: jsonobj = json.loads(result.decode('utf-8')) # python3.3 FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter]) def doCreateQueue(argv, arglist): g_param = parse_global_arg(argv) if "help" in argv: show_help("CreateQueue", g_param[OptionsDefine.Version]) return param = { "QueueName": argv.get("--QueueName"), "MaxMsgHeapNum": Utils.try_to_json(argv, "--MaxMsgHeapNum"), "PollingWaitSeconds": Utils.try_to_json(argv, "--PollingWaitSeconds"), "VisibilityTimeout": Utils.try_to_json(argv, "--VisibilityTimeout"), "MaxMsgSize": Utils.try_to_json(argv, "--MaxMsgSize"), "MsgRetentionSeconds": Utils.try_to_json(argv, "--MsgRetentionSeconds"), "RewindSeconds": Utils.try_to_json(argv, "--RewindSeconds"), "Transaction": Utils.try_to_json(argv, "--Transaction"), "FirstQueryInterval": Utils.try_to_json(argv, "--FirstQueryInterval"), "MaxQueryCount": Utils.try_to_json(argv, "--MaxQueryCount"), "DeadLetterQueueName": argv.get("--DeadLetterQueueName"), "Policy": Utils.try_to_json(argv, "--Policy"), "MaxReceiveCount": Utils.try_to_json(argv, "--MaxReceiveCount"), "MaxTimeToLive": Utils.try_to_json(argv, "--MaxTimeToLive"), "Trace": Utils.try_to_json(argv, "--Trace"), } cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey]) http_profile = HttpProfile( reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]), reqMethod="POST", endpoint=g_param[OptionsDefine.Endpoint] ) profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256") mod = CLIENT_MAP[g_param[OptionsDefine.Version]] client = mod.CmqClient(cred, g_param[OptionsDefine.Region], profile) client._sdkVersion += ("_CLI_" + __version__) models = MODELS_MAP[g_param[OptionsDefine.Version]] model = models.CreateQueueRequest() model.from_json_string(json.dumps(param)) rsp = client.CreateQueue(model) result = rsp.to_json_string() jsonobj = None try: jsonobj = json.loads(result) except TypeError as e: jsonobj = json.loads(result.decode('utf-8')) # python3.3 FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter]) def doRewindQueue(argv, arglist): g_param = parse_global_arg(argv) if "help" in argv: show_help("RewindQueue", g_param[OptionsDefine.Version]) return param = { "QueueName": argv.get("--QueueName"), "StartConsumeTime": Utils.try_to_json(argv, "--StartConsumeTime"), } cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey]) http_profile = HttpProfile( reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]), reqMethod="POST", endpoint=g_param[OptionsDefine.Endpoint] ) profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256") mod = CLIENT_MAP[g_param[OptionsDefine.Version]] client = mod.CmqClient(cred, g_param[OptionsDefine.Region], profile) client._sdkVersion += ("_CLI_" + __version__) models = MODELS_MAP[g_param[OptionsDefine.Version]] model = models.RewindQueueRequest() model.from_json_string(json.dumps(param)) rsp = client.RewindQueue(model) result = rsp.to_json_string() jsonobj = None try: jsonobj = json.loads(result) except TypeError as e: jsonobj = json.loads(result.decode('utf-8')) # python3.3 FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter]) def doModifySubscriptionAttribute(argv, arglist): g_param = parse_global_arg(argv) if "help" in argv: show_help("ModifySubscriptionAttribute", g_param[OptionsDefine.Version]) return param = { "TopicName": argv.get("--TopicName"), "SubscriptionName": argv.get("--SubscriptionName"), "NotifyStrategy": argv.get("--NotifyStrategy"), "NotifyContentFormat": argv.get("--NotifyContentFormat"), "FilterTags": Utils.try_to_json(argv, "--FilterTags"), "BindingKey": Utils.try_to_json(argv, "--BindingKey"), } cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey]) http_profile = HttpProfile( reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]), reqMethod="POST", endpoint=g_param[OptionsDefine.Endpoint] ) profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256") mod = CLIENT_MAP[g_param[OptionsDefine.Version]] client = mod.CmqClient(cred, g_param[OptionsDefine.Region], profile) client._sdkVersion += ("_CLI_" + __version__) models = MODELS_MAP[g_param[OptionsDefine.Version]] model = models.ModifySubscriptionAttributeRequest() model.from_json_string(json.dumps(param)) rsp = client.ModifySubscriptionAttribute(model) result = rsp.to_json_string() jsonobj = None try: jsonobj = json.loads(result) except TypeError as e: jsonobj = json.loads(result.decode('utf-8')) # python3.3 FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter]) def doDescribeTopicDetail(argv, arglist): g_param = parse_global_arg(argv) if "help" in argv: show_help("DescribeTopicDetail", g_param[OptionsDefine.Version]) return param = { "Offset": Utils.try_to_json(argv, "--Offset"), "Limit": Utils.try_to_json(argv, "--Limit"), "Filters": Utils.try_to_json(argv, "--Filters"), "TagKey": argv.get("--TagKey"), "TopicName": argv.get("--TopicName"), } cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey]) http_profile = HttpProfile( reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]), reqMethod="POST", endpoint=g_param[OptionsDefine.Endpoint] ) profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256") mod = CLIENT_MAP[g_param[OptionsDefine.Version]] client = mod.CmqClient(cred, g_param[OptionsDefine.Region], profile) client._sdkVersion += ("_CLI_" + __version__) models = MODELS_MAP[g_param[OptionsDefine.Version]] model = models.DescribeTopicDetailRequest() model.from_json_string(json.dumps(param)) rsp = client.DescribeTopicDetail(model) result = rsp.to_json_string() jsonobj = None try: jsonobj = json.loads(result) except TypeError as e: jsonobj = json.loads(result.decode('utf-8')) # python3.3 FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter]) def doDescribeQueueDetail(argv, arglist): g_param = parse_global_arg(argv) if "help" in argv: show_help("DescribeQueueDetail", g_param[OptionsDefine.Version]) return param = { "Offset": Utils.try_to_json(argv, "--Offset"), "Limit": Utils.try_to_json(argv, "--Limit"), "Filters": Utils.try_to_json(argv, "--Filters"), "TagKey": argv.get("--TagKey"), "QueueName": argv.get("--QueueName"), } cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey]) http_profile = HttpProfile( reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]), reqMethod="POST", endpoint=g_param[OptionsDefine.Endpoint] ) profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256") mod = CLIENT_MAP[g_param[OptionsDefine.Version]] client = mod.CmqClient(cred, g_param[OptionsDefine.Region], profile) client._sdkVersion += ("_CLI_" + __version__) models = MODELS_MAP[g_param[OptionsDefine.Version]] model = models.DescribeQueueDetailRequest() model.from_json_string(json.dumps(param)) rsp = client.DescribeQueueDetail(model) result = rsp.to_json_string() jsonobj = None try: jsonobj = json.loads(result) except TypeError as e: jsonobj = json.loads(result.decode('utf-8')) # python3.3 FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter]) def doDeleteQueue(argv, arglist): g_param = parse_global_arg(argv) if "help" in argv: show_help("DeleteQueue", g_param[OptionsDefine.Version]) return param = { "QueueName": argv.get("--QueueName"), } cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey]) http_profile = HttpProfile( reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]), reqMethod="POST", endpoint=g_param[OptionsDefine.Endpoint] ) profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256") mod = CLIENT_MAP[g_param[OptionsDefine.Version]] client = mod.CmqClient(cred, g_param[OptionsDefine.Region], profile) client._sdkVersion += ("_CLI_" + __version__) models = MODELS_MAP[g_param[OptionsDefine.Version]] model = models.DeleteQueueRequest() model.from_json_string(json.dumps(param)) rsp = client.DeleteQueue(model) result = rsp.to_json_string() jsonobj = None try: jsonobj = json.loads(result) except TypeError as e: jsonobj = json.loads(result.decode('utf-8')) # python3.3 FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter]) def doDescribeSubscriptionDetail(argv, arglist): g_param = parse_global_arg(argv) if "help" in argv: show_help("DescribeSubscriptionDetail", g_param[OptionsDefine.Version]) return param = { "TopicName": argv.get("--TopicName"), "Offset": Utils.try_to_json(argv, "--Offset"), "Limit": Utils.try_to_json(argv, "--Limit"), "Filters": Utils.try_to_json(argv, "--Filters"), } cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey]) http_profile = HttpProfile( reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]), reqMethod="POST", endpoint=g_param[OptionsDefine.Endpoint] ) profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256") mod = CLIENT_MAP[g_param[OptionsDefine.Version]] client = mod.CmqClient(cred, g_param[OptionsDefine.Region], profile) client._sdkVersion += ("_CLI_" + __version__) models = MODELS_MAP[g_param[OptionsDefine.Version]] model = models.DescribeSubscriptionDetailRequest() model.from_json_string(json.dumps(param)) rsp = client.DescribeSubscriptionDetail(model) result = rsp.to_json_string() jsonobj = None try: jsonobj = json.loads(result) except TypeError as e: jsonobj = json.loads(result.decode('utf-8')) # python3.3 FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter]) def doDescribeDeadLetterSourceQueues(argv, arglist): g_param = parse_global_arg(argv) if "help" in argv: show_help("DescribeDeadLetterSourceQueues", g_param[OptionsDefine.Version]) return param = { "DeadLetterQueueName": argv.get("--DeadLetterQueueName"), "Limit": Utils.try_to_json(argv, "--Limit"), "Offset": Utils.try_to_json(argv, "--Offset"), "Filters": Utils.try_to_json(argv, "--Filters"), } cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey]) http_profile = HttpProfile( reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]), reqMethod="POST", endpoint=g_param[OptionsDefine.Endpoint] ) profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256") mod = CLIENT_MAP[g_param[OptionsDefine.Version]] client = mod.CmqClient(cred, g_param[OptionsDefine.Region], profile) client._sdkVersion += ("_CLI_" + __version__) models = MODELS_MAP[g_param[OptionsDefine.Version]] model = models.DescribeDeadLetterSourceQueuesRequest() model.from_json_string(json.dumps(param)) rsp = client.DescribeDeadLetterSourceQueues(model) result = rsp.to_json_string() jsonobj = None try: jsonobj = json.loads(result) except TypeError as e: jsonobj = json.loads(result.decode('utf-8')) # python3.3 FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter]) def doDeleteTopic(argv, arglist): g_param = parse_global_arg(argv) if "help" in argv: show_help("DeleteTopic", g_param[OptionsDefine.Version]) return param = { "TopicName": argv.get("--TopicName"), } cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey]) http_profile = HttpProfile( reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]), reqMethod="POST", endpoint=g_param[OptionsDefine.Endpoint] ) profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256") mod = CLIENT_MAP[g_param[OptionsDefine.Version]] client = mod.CmqClient(cred, g_param[OptionsDefine.Region], profile) client._sdkVersion += ("_CLI_" + __version__) models = MODELS_MAP[g_param[OptionsDefine.Version]] model = models.DeleteTopicRequest() model.from_json_string(json.dumps(param)) rsp = client.DeleteTopic(model) result = rsp.to_json_string() jsonobj = None try: jsonobj = json.loads(result) except TypeError as e: jsonobj = json.loads(result.decode('utf-8')) # python3.3 FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter]) def doClearQueue(argv, arglist): g_param = parse_global_arg(argv) if "help" in argv: show_help("ClearQueue", g_param[OptionsDefine.Version]) return param = { "QueueName": argv.get("--QueueName"), } cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey]) http_profile = HttpProfile( reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]), reqMethod="POST", endpoint=g_param[OptionsDefine.Endpoint] ) profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256") mod = CLIENT_MAP[g_param[OptionsDefine.Version]] client = mod.CmqClient(cred, g_param[OptionsDefine.Region], profile) client._sdkVersion += ("_CLI_" + __version__) models = MODELS_MAP[g_param[OptionsDefine.Version]] model = models.ClearQueueRequest() model.from_json_string(json.dumps(param)) rsp = client.ClearQueue(model) result = rsp.to_json_string() jsonobj = None try: jsonobj = json.loads(result) except TypeError as e: jsonobj = json.loads(result.decode('utf-8')) # python3.3 FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter]) def doUnbindDeadLetter(argv, arglist): g_param = parse_global_arg(argv) if "help" in argv: show_help("UnbindDeadLetter", g_param[OptionsDefine.Version]) return param = { "QueueName": argv.get("--QueueName"), } cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey]) http_profile = HttpProfile( reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]), reqMethod="POST", endpoint=g_param[OptionsDefine.Endpoint] ) profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256") mod = CLIENT_MAP[g_param[OptionsDefine.Version]] client = mod.CmqClient(cred, g_param[OptionsDefine.Region], profile) client._sdkVersion += ("_CLI_" + __version__) models = MODELS_MAP[g_param[OptionsDefine.Version]] model = models.UnbindDeadLetterRequest() model.from_json_string(json.dumps(param)) rsp = client.UnbindDeadLetter(model) result = rsp.to_json_string() jsonobj = None try: jsonobj = json.loads(result) except TypeError as e: jsonobj = json.loads(result.decode('utf-8')) # python3.3 FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter]) def doModifyQueueAttribute(argv, arglist): g_param = parse_global_arg(argv) if "help" in argv: show_help("ModifyQueueAttribute", g_param[OptionsDefine.Version]) return param = { "QueueName": argv.get("--QueueName"), "MaxMsgHeapNum": Utils.try_to_json(argv, "--MaxMsgHeapNum"), "PollingWaitSeconds": Utils.try_to_json(argv, "--PollingWaitSeconds"), "VisibilityTimeout": Utils.try_to_json(argv, "--VisibilityTimeout"), "MaxMsgSize": Utils.try_to_json(argv, "--MaxMsgSize"), "MsgRetentionSeconds": Utils.try_to_json(argv, "--MsgRetentionSeconds"), "RewindSeconds": Utils.try_to_json(argv, "--RewindSeconds"), "FirstQueryInterval": Utils.try_to_json(argv, "--FirstQueryInterval"), "MaxQueryCount": Utils.try_to_json(argv, "--MaxQueryCount"), "DeadLetterQueueName": argv.get("--DeadLetterQueueName"), "MaxTimeToLive": Utils.try_to_json(argv, "--MaxTimeToLive"), "MaxReceiveCount": Utils.try_to_json(argv, "--MaxReceiveCount"), "Policy": Utils.try_to_json(argv, "--Policy"), "Trace": Utils.try_to_json(argv, "--Trace"), } cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey]) http_profile = HttpProfile( reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]), reqMethod="POST", endpoint=g_param[OptionsDefine.Endpoint] ) profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256") mod = CLIENT_MAP[g_param[OptionsDefine.Version]] client = mod.CmqClient(cred, g_param[OptionsDefine.Region], profile) client._sdkVersion += ("_CLI_" + __version__) models = MODELS_MAP[g_param[OptionsDefine.Version]] model = models.ModifyQueueAttributeRequest() model.from_json_string(json.dumps(param)) rsp = client.ModifyQueueAttribute(model) result = rsp.to_json_string() jsonobj = None try: jsonobj = json.loads(result) except TypeError as e: jsonobj = json.loads(result.decode('utf-8')) # python3.3 FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter]) CLIENT_MAP = { "v20190304": cmq_client_v20190304, } MODELS_MAP = { "v20190304": models_v20190304, } ACTION_MAP = { "CreateTopic": doCreateTopic, "CreateSubscribe": doCreateSubscribe, "ModifyTopicAttribute": doModifyTopicAttribute, "ClearSubscriptionFilterTags": doClearSubscriptionFilterTags, "DeleteSubscribe": doDeleteSubscribe, "CreateQueue": doCreateQueue, "RewindQueue": doRewindQueue, "ModifySubscriptionAttribute": doModifySubscriptionAttribute, "DescribeTopicDetail": doDescribeTopicDetail, "DescribeQueueDetail": doDescribeQueueDetail, "DeleteQueue": doDeleteQueue, "DescribeSubscriptionDetail": doDescribeSubscriptionDetail, "DescribeDeadLetterSourceQueues": doDescribeDeadLetterSourceQueues, "DeleteTopic": doDeleteTopic, "ClearQueue": doClearQueue, "UnbindDeadLetter": doUnbindDeadLetter, "ModifyQueueAttribute": doModifyQueueAttribute, } AVAILABLE_VERSION_LIST = [ v20190304.version, ] AVAILABLE_VERSIONS = { 'v' + v20190304.version.replace('-', ''): {"help": v20190304_help.INFO,"desc": v20190304_help.DESC}, } def cmq_action(argv, arglist): if "help" in argv: versions = sorted(AVAILABLE_VERSIONS.keys()) opt_v = "--" + OptionsDefine.Version version = versions[-1] if opt_v in argv: version = 'v' + argv[opt_v].replace('-', '') if version not in versions: print("available versions: %s" % " ".join(AVAILABLE_VERSION_LIST)) return action_str = "" docs = AVAILABLE_VERSIONS[version]["help"] desc = AVAILABLE_VERSIONS[version]["desc"] for action, info in docs.items(): action_str += " %s\n" % action action_str += Utils.split_str(" ", info["desc"], 120) helpstr = HelpTemplate.SERVICE % {"name": "cmq", "desc": desc, "actions": action_str} print(helpstr) else: print(ErrorMsg.FEW_ARG) def version_merge(): help_merge = {} for v in AVAILABLE_VERSIONS: for action in AVAILABLE_VERSIONS[v]["help"]: if action not in help_merge: help_merge[action] = {} help_merge[action]["cb"] = ACTION_MAP[action] help_merge[action]["params"] = [] for param in AVAILABLE_VERSIONS[v]["help"][action]["params"]: if param["name"] not in help_merge[action]["params"]: help_merge[action]["params"].append(param["name"]) return help_merge def register_arg(command): cmd = NiceCommand("cmq", cmq_action) command.reg_cmd(cmd) cmd.reg_opt("help", "bool") cmd.reg_opt(OptionsDefine.Version, "string") help_merge = version_merge() for actionName, action in help_merge.items(): c = NiceCommand(actionName, action["cb"]) cmd.reg_cmd(c) c.reg_opt("help", "bool") for param in action["params"]: c.reg_opt("--" + param, "string") for opt in OptionsDefine.ACTION_GLOBAL_OPT: stropt = "--" + opt c.reg_opt(stropt, "string") def parse_global_arg(argv): params = {} for opt in OptionsDefine.ACTION_GLOBAL_OPT: stropt = "--" + opt if stropt in argv: params[opt] = argv[stropt] else: params[opt] = None if params[OptionsDefine.Version]: params[OptionsDefine.Version] = "v" + params[OptionsDefine.Version].replace('-', '') config_handle = Configure() profile = config_handle.profile if ("--" + OptionsDefine.Profile) in argv: profile = argv[("--" + OptionsDefine.Profile)] is_conexist, conf_path = config_handle._profile_existed(profile + "." + config_handle.configure) is_creexist, cred_path = config_handle._profile_existed(profile + "." + config_handle.credential) config = {} cred = {} if is_conexist: config = config_handle._load_json_msg(conf_path) if is_creexist: cred = config_handle._load_json_msg(cred_path) if os.environ.get(OptionsDefine.ENV_SECRET_ID): cred[OptionsDefine.SecretId] = os.environ.get(OptionsDefine.ENV_SECRET_ID) if os.environ.get(OptionsDefine.ENV_SECRET_KEY): cred[OptionsDefine.SecretKey] = os.environ.get(OptionsDefine.ENV_SECRET_KEY) if os.environ.get(OptionsDefine.ENV_REGION): config[OptionsDefine.Region] = os.environ.get(OptionsDefine.ENV_REGION) for param in params.keys(): if param == OptionsDefine.Version: continue if params[param] is None: if param in [OptionsDefine.SecretKey, OptionsDefine.SecretId]: if param in cred: params[param] = cred[param] else: raise Exception("%s is invalid" % param) else: if param in config: params[param] = config[param] elif param == OptionsDefine.Region: raise Exception("%s is invalid" % OptionsDefine.Region) try: if params[OptionsDefine.Version] is None: version = config["cmq"][OptionsDefine.Version] params[OptionsDefine.Version] = "v" + version.replace('-', '') if params[OptionsDefine.Endpoint] is None: params[OptionsDefine.Endpoint] = config["cmq"][OptionsDefine.Endpoint] except Exception as err: raise Exception("config file:%s error, %s" % (conf_path, str(err))) versions = sorted(AVAILABLE_VERSIONS.keys()) if params[OptionsDefine.Version] not in versions: raise Exception("available versions: %s" % " ".join(AVAILABLE_VERSION_LIST)) return params def show_help(action, version): docs = AVAILABLE_VERSIONS[version]["help"][action] desc = AVAILABLE_VERSIONS[version]["desc"] docstr = "" for param in docs["params"]: docstr += " %s\n" % ("--" + param["name"]) docstr += Utils.split_str(" ", param["desc"], 120) helpmsg = HelpTemplate.ACTION % {"name": action, "service": "cmq", "desc": desc, "params": docstr} print(helpmsg) def get_actions_info(): config = Configure() new_version = max(AVAILABLE_VERSIONS.keys()) version = new_version try: profile = config._load_json_msg(os.path.join(config.cli_path, "default.configure")) version = profile["cmq"]["version"] version = "v" + version.replace('-', '') except Exception: pass if version not in AVAILABLE_VERSIONS.keys(): version = new_version return AVAILABLE_VERSIONS[version]["help"]
40.857143
105
0.688633
3,704
33,748
6.075324
0.063175
0.054393
0.15789
0.058925
0.765631
0.740923
0.728614
0.70515
0.696529
0.660801
0
0.009627
0.187419
33,748
825
106
40.906667
0.810962
0.00566
0
0.639615
0
0
0.104896
0.012195
0
0
0
0
0
1
0.031637
false
0.001376
0.023384
0
0.083906
0.005502
0
0
0
null
0
0
0
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
356247add8f9ef31658f6dc9b757e5bd99f9e50d
173
py
Python
Sergeant-RANK/PRACTICE/1016A.py
rohansaini886/Peer-Programming-Hub-CP-Winter_Camp
d27fb6aa7e726e6d2cb95270c9e644d38d64dd1c
[ "MIT" ]
2
2021-12-09T18:07:46.000Z
2022-01-26T16:51:18.000Z
Sergeant-RANK/PRACTICE/1016A.py
rohansaini886/Peer-Programming-Hub-CP-Winter_Camp
d27fb6aa7e726e6d2cb95270c9e644d38d64dd1c
[ "MIT" ]
null
null
null
Sergeant-RANK/PRACTICE/1016A.py
rohansaini886/Peer-Programming-Hub-CP-Winter_Camp
d27fb6aa7e726e6d2cb95270c9e644d38d64dd1c
[ "MIT" ]
null
null
null
n, m = map(int, input().split(" ")) l = list(map(int, input().split(" "))) total = 0 for i in l: total += i print(int(total // m), end = " ") total = total % m
21.625
38
0.49711
28
173
3.071429
0.535714
0.139535
0.255814
0.372093
0
0
0
0
0
0
0
0.007874
0.265896
173
7
39
24.714286
0.669291
0
0
0
0
0
0.017341
0
0
0
0
0
0
1
0
false
0
0
0
0
0.142857
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
356b975c868ee9c2549353dd9b18c36064365907
57
py
Python
vae/kpmtest.py
Drishttii/pyprobml
30b120e7d4f81ade55c10250193d98398040574b
[ "MIT" ]
null
null
null
vae/kpmtest.py
Drishttii/pyprobml
30b120e7d4f81ade55c10250193d98398040574b
[ "MIT" ]
null
null
null
vae/kpmtest.py
Drishttii/pyprobml
30b120e7d4f81ade55c10250193d98398040574b
[ "MIT" ]
null
null
null
import scripts.pyprobml_utils as pml pml.test() print(42)
19
36
0.807018
10
57
4.5
0.9
0
0
0
0
0
0
0
0
0
0
0.038462
0.087719
57
3
37
19
0.826923
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.333333
0
0.333333
0.333333
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
35b1627066dadb7f93af2a5dc09b52c700759594
1,479
py
Python
tests/v2/test_logs_response_metadata.py
MichaelTROEHLER/datadog-api-client-python
12c46626622fb1277bb1e172753b342c671348bd
[ "Apache-2.0" ]
null
null
null
tests/v2/test_logs_response_metadata.py
MichaelTROEHLER/datadog-api-client-python
12c46626622fb1277bb1e172753b342c671348bd
[ "Apache-2.0" ]
null
null
null
tests/v2/test_logs_response_metadata.py
MichaelTROEHLER/datadog-api-client-python
12c46626622fb1277bb1e172753b342c671348bd
[ "Apache-2.0" ]
null
null
null
# coding: utf-8 # Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License. # This product includes software developed at Datadog (https://www.datadoghq.com/). # Copyright 2019-Present Datadog, Inc. from __future__ import absolute_import import sys import unittest import datadog_api_client.v2 try: from datadog_api_client.v2.model import logs_aggregate_response_status except ImportError: logs_aggregate_response_status = sys.modules[ 'datadog_api_client.v2.model.logs_aggregate_response_status'] try: from datadog_api_client.v2.model import logs_response_metadata_page except ImportError: logs_response_metadata_page = sys.modules[ 'datadog_api_client.v2.model.logs_response_metadata_page'] try: from datadog_api_client.v2.model import logs_warning except ImportError: logs_warning = sys.modules[ 'datadog_api_client.v2.model.logs_warning'] from datadog_api_client.v2.model.logs_response_metadata import LogsResponseMetadata class TestLogsResponseMetadata(unittest.TestCase): """LogsResponseMetadata unit test stubs""" def setUp(self): pass def tearDown(self): pass def testLogsResponseMetadata(self): """Test LogsResponseMetadata""" # FIXME: construct object with mandatory attributes with example values # model = LogsResponseMetadata() # noqa: E501 pass if __name__ == '__main__': unittest.main()
30.183673
108
0.762001
182
1,479
5.906593
0.43956
0.074419
0.11907
0.133953
0.273488
0.273488
0.269767
0.269767
0.111628
0
0
0.01461
0.167005
1,479
48
109
30.8125
0.857955
0.281947
0
0.310345
0
0
0.154067
0.146411
0
0
0
0.020833
0
1
0.103448
false
0.103448
0.37931
0
0.517241
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
1
1
0
1
0
0
4
35d384dcab4c010cd350e77e30e9279bbc2b1152
2,739
py
Python
effort/src/datasets/effort/Mystery1.py
rahlk/Bellwether
39e0e63504a6dfdeeb5d6e8d733e708d1485ecd9
[ "Unlicense" ]
9
2017-07-27T10:32:48.000Z
2021-07-01T11:51:51.000Z
effort/src/datasets/effort/Mystery1.py
rahlk/Bellwether
39e0e63504a6dfdeeb5d6e8d733e708d1485ecd9
[ "Unlicense" ]
11
2016-03-15T16:27:47.000Z
2019-09-05T02:25:08.000Z
effort/src/datasets/effort/Mystery1.py
rahlk/Bellwether
39e0e63504a6dfdeeb5d6e8d733e708d1485ecd9
[ "Unlicense" ]
5
2017-01-28T22:45:34.000Z
2019-12-04T13:15:10.000Z
""" # The JPL Data Set Mystery Dataset 1 Standard header: """ from __future__ import division,print_function import sys sys.dont_write_bytecode = True from lib import * """ Data: """ def run(weighFeature = False,split="median"): vl=1;l=2;n=3;h=4;vh=5;xh=6;_=0 return data(indep= [ # 0..8 'Prec','Flex','Resl','Team','Pmat','rely','cplx','data','ruse', # 9 .. 17 'time','stor','pvol','acap','pcap','pcon','aexp','plex','ltex', # 18 .. 25 'tool','sced','site','docu','kloc'], less = ['effort', 'xyz', 'abc'], _rows=[ [2, 2, 2, 3, 3, 4, 5, 4, 3, 5, 6, 4, 4, 4, 3, 4, 3, 3, 1, 3, 4, 4, 77, 1830, 77, 38.5], [2, 2, 2, 3, 3, 5, 5, 2, 3, 5, 6, 2, 4, 3, 3, 2, 1, 2, 2, 3, 4, 4, 23.8, 648, 23.8, 11.9], [2, 2, 2, 3, 3, 4, 5, 3, 3, 5, 5, 4, 3, 3, 3, 3, 2, 2, 1, 3, 4, 4, 22.5, 492, 22.5, 11.25], [2, 2, 3, 3, 2, 4, 4, 3, 2, 3, 3, 4, 3, 3, 3, 3, 3, 4, 2, 3, 5, 3, 146, 3291.8, 122, 61], [2, 3, 3, 5, 3, 3, 4, 3, 2, 4, 4, 2, 5, 5, 4, 5, 1, 5, 3, 3, 6, 3, 113.19, 1080, 113.19, 56.595], [3, 3, 3, 3, 3, 3, 4, 3, 2, 3, 3, 3, 3, 3, 3, 4, 3, 4, 2, 3, 4, 3, 184, 1042.8, 160, 80], [5, 3, 3, 3, 4, 4, 4, 3, 2, 3, 3, 2, 3, 3, 3, 5, 3, 4, 2, 3, 5, 3, 60.5, 336, 54, 27], [5, 3, 3, 4, 4, 4, 5, 3, 2, 3, 3, 2, 3, 3, 3, 5, 3, 4, 2, 3, 6, 3, 50, 637, 32.89, 16.445], [3, 3, 3, 2, 3, 4, 5, 3, 2, 3, 3, 3, 3, 3, 3, 4, 3, 4, 2, 3, 5, 3, 253, 2519, 188, 94], [3, 3, 4, 3, 3, 4, 4, 3, 4, 3, 3, 2, 3, 4, 3, 3, 1, 4, 5, 3, 2, 3, 158.75, 1047.9, 131, 65.5], [3, 3, 3, 3, 3, 4, 5, 3, 2, 3, 3, 4, 4, 4, 5, 4, 4, 4, 2, 1, 5, 3, 324, 1735.4, 245, 122.5], [3, 2, 4, 4, 3, 4, 5, 3, 4, 3, 4, 5, 4, 4, 3, 4, 4, 3, 4, 2, 6, 3, 224, 691, 153, 76.5], [5, 2, 2, 4, 3, 4, 3, 3, 4, 5, 4, 3, 4, 4, 3, 4, 4, 4, 3, 3, 3, 3, 104.6, 320, 48, 24], [3, 2, 2, 4, 3, 4, 3, 3, 3, 3, 3, 2, 4, 4, 3, 4, 4, 4, 3, 3, 3, 3, 173.4, 329, 98, 49], [3, 2, 4, 3, 3, 4, 5, 3, 4, 3, 3, 4, 3, 4, 4, 4, 3, 3, 3, 3, 5, 3, 597, 1705, 597, 298.5], [4, 2, 4, 3, 5, 4, 3, 2, 3, 3, 4, 4, 2, 2, 3, 3, 5, 5, 3, 3, 5, 3, 155, 789, 129, 64.5], [4, 3, 3, 3, 4, 4, 4, 3, 2, 3, 3, 3, 4, 4, 3, 5, 4, 4, 2, 3, 5, 3, 170, 552, 100, 50] ], _tunings =[[ # vlow low nom high vhigh xhigh #scale factors: 'Prec', 6.20, 4.96, 3.72, 2.48, 1.24, _ ],[ 'Flex', 5.07, 4.05, 3.04, 2.03, 1.01, _ ],[ 'Resl', 7.07, 5.65, 4.24, 2.83, 1.41, _ ],[ 'Pmat', 7.80, 6.24, 4.68, 3.12, 1.56, _ ],[ 'Team', 5.48, 4.38, 3.29, 2.19, 1.01, _ ]], weighFeature = weighFeature, _split = split, _dataTypes = [int]*22 + [float]*4 ) """ Demo code: """ def _JPL(): print(JPL()) #if __name__ == '__main__': eval(todo('_nasa93()'))
42.138462
103
0.428258
634
2,739
1.807571
0.271293
0.132635
0.091623
0.066318
0.256545
0.212914
0.132635
0.09075
0.084642
0.076789
0
0.356037
0.292443
2,739
64
104
42.796875
0.235294
0.067178
0
0
0
0
0.051876
0
0
0
0
0.015625
0
1
0.04878
false
0
0.073171
0
0.146341
0.04878
0
0
1
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
4
ea2efbfb212e626bf0c9c5af731219d728e0f1a6
4,728
py
Python
test/domain/test_base.py
ip4-team/bedhandler
6415327fe6de1e7b2167ab29598927a71638deb3
[ "MIT" ]
1
2018-12-06T13:07:45.000Z
2018-12-06T13:07:45.000Z
test/domain/test_base.py
ip4-team/bedhandler
6415327fe6de1e7b2167ab29598927a71638deb3
[ "MIT" ]
5
2018-10-22T14:37:52.000Z
2020-09-24T21:01:05.000Z
test/domain/test_base.py
ip4-team/bedhandler
6415327fe6de1e7b2167ab29598927a71638deb3
[ "MIT" ]
null
null
null
import pytest from bedhandler.domain import BaseList, BaseMultList # a list base_list = BaseList() # a list of lists base_mult_list = BaseMultList() @pytest.fixture(autouse=True) def setup(): base_list.clear() base_mult_list.clear() def test_base_list_str_single_number(): """ test baseList.__str__ with single element """ base_list.append(1) assert str(base_list) == '1' def test_mult_list_str_single_base_list(): """ test baseMultList.__str__ with single baseList """ base_list.append(1) base_mult_list.append(base_list) assert str(base_mult_list) == '1' def test_base_list_str_multiple_elements(): """ test baseList.__str__ with multiple elements """ base_list.extend([1, 2]) assert str(base_list) == '1,2' def test_base_mult_list_str_multiple_base_list(): """ test baseList.__str__ with multiple baseLists """ base_list.append(1) # [1] base_mult_list.extend([base_list, base_list]) assert str(base_mult_list) == '1&1' def test_base_mult_list_str_multiple_base_list_with_multiple_elements(): """ test baseMultList.__str__ with multiple baseLists with multiple elements """ base_list.extend([1, 2]) base_mult_list.extend([base_list, base_list]) assert str(base_mult_list) == '1,2&1,2' def test_base_mult_list_flattened_single_base_list_with_single_element(): """ test baseMultList.flattened with single baseList with single element """ base_list.append(1) base_mult_list.append(base_list) assert base_mult_list.flattened() == [1] def test_base_mult_list_flattened_single_base_list_with_multiple_elements(): """ test baseMultList.flattened with single baseList with multiple elements """ base_list.extend([1, 2]) base_mult_list.append(base_list) assert base_mult_list.flattened() == [1, 2] def test_base_mult_list_flattened_multiple_base_list_without_element_repetitions(): """ test baseMultList.flattened with multiple baseList without element repetitions """ base_list.extend([1, 3]) base_list_2 = BaseList() base_list_2.append(2) base_mult_list.extend([base_list, base_list_2]) assert base_mult_list.flattened() == [1, 3, 2] def test_base_mult_list_flattened_multiple_base_list_with_element_repetitions(): """ test baseMultList.flattened with multiple baseList with element repetitions """ base_list.extend([1, 2]) base_mult_list.extend([base_list, base_list]) assert base_mult_list.flattened() == [1, 2, 1, 2] def test_base_mult_list_unique_flattened_base_list_with_single_element(): """ test baseMultList.unique_flattened with single baseList with single element """ base_list.append(1) base_mult_list.append(base_list) assert base_mult_list.unique_flattened() == [1] def test_base_mult_list_unique_flattened_single_base_list_with_multiple_elements(): """ test baseMultList.unique_flattened with single baseList with multiple elements """ base_list.extend([1, 2]) base_mult_list.append(base_list) assert base_mult_list.unique_flattened() == [1, 2] def test_base_mult_list_unique_flattened_multiple_base_list_without_element_repetitions(): """ test baseMultList.unique_flattened with multiple base_list without element repetitions """ base_list.extend([1, 3]) base_list_2 = BaseList() base_list_2.append(2) base_mult_list.extend([base_list, base_list_2]) assert base_mult_list.unique_flattened() == [1, 2, 3] def test_base_mult_list_unique_flattened_multiple_base_list_with_element_repetitions(): """ test baseMultList.unique_flattened with multiple baseList with element repetitions """ base_list.extend([1, 2]) base_mult_list.extend([base_list, base_list]) assert base_mult_list.unique_flattened() == [1, 2] def test_base_list_is_empty_without_elements(): """ test baseList.is_empty is True when there are not elements """ assert base_list.is_empty() is True def test_base_mult_list_is_empty_without_base_lists(): """ test baseMultList.is_empty is True when there are no baseLists """ assert base_mult_list.is_empty() is True def test_base_list_is_empty_containing_elements(): """ test baseList.is_empty is False when there are elements """ assert BaseList([1]).is_empty() is False assert BaseList([1, 2]).is_empty() is False def test_base_mult_list_is_empty_containing_base_lists(): """ test baseMultList.is_empty is False when there are baseLists """ assert BaseMultList([BaseList([1])]).is_empty() is False assert BaseMultList([BaseList([1]), BaseList([2])]).is_empty() is False
28.829268
90
0.732022
666
4,728
4.779279
0.070571
0.138234
0.139491
0.05655
0.845743
0.798303
0.772856
0.647188
0.579642
0.461828
0
0.016273
0.168147
4,728
163
91
29.006135
0.793033
0.241117
0
0.416667
0
0
0.004505
0
0
0
0
0
0.263889
1
0.25
false
0
0.027778
0
0.277778
0
0
0
0
null
0
0
0
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
4
ea7d74d765f8358b624c2debece8d250c386c9dc
115
py
Python
exp/views/support.py
manybabies/MBAH-LookIt-API
18474ceac3dcc8365a5559cf84e9f460671993f5
[ "MIT" ]
9
2018-06-26T17:15:27.000Z
2021-11-21T17:19:01.000Z
exp/views/support.py
manybabies/MBAH-LookIt-API
18474ceac3dcc8365a5559cf84e9f460671993f5
[ "MIT" ]
496
2018-02-19T19:18:24.000Z
2022-03-31T17:01:16.000Z
exp/views/support.py
manybabies/MBAH-LookIt-API
18474ceac3dcc8365a5559cf84e9f460671993f5
[ "MIT" ]
16
2018-07-06T23:35:39.000Z
2021-11-21T17:52:58.000Z
from django.views import generic class SupportView(generic.TemplateView): template_name = "exp/support.html"
19.166667
40
0.782609
14
115
6.357143
0.928571
0
0
0
0
0
0
0
0
0
0
0
0.130435
115
5
41
23
0.89
0
0
0
0
0
0.13913
0
0
0
0
0
0
1
0
false
0
0.333333
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
ea80f4195f2b8c847b12250cbc3d3572d696515f
1,201
py
Python
week1 and extra/copypasta.py
harshonyou/SOFT1
1bd2b0cc26d39c549bec576389bebd0fd011387d
[ "Apache-2.0" ]
null
null
null
week1 and extra/copypasta.py
harshonyou/SOFT1
1bd2b0cc26d39c549bec576389bebd0fd011387d
[ "Apache-2.0" ]
null
null
null
week1 and extra/copypasta.py
harshonyou/SOFT1
1bd2b0cc26d39c549bec576389bebd0fd011387d
[ "Apache-2.0" ]
null
null
null
import turtle wn=turtle.Screen() wn.bgcolor("black") wn.title("Maze") wn.setup(500,500) class Pen(turtle.Turtle): def __init__(self): turtle.Turtle.__init__(self) self.shape("square") self.color("white") self.penup() self.speed(0) pass pass levels = [""] level_1 = [ "XXXXXXXXXXXXXXXXX", " X X X", "XXX X XXXXXXX X X", "X X X X X X X", "X X X X XXX X X X", "X X X X X X X", "X XXXXXXXXX X X X", "X X X", "XXX XXX XXXXX X X", "X X X X X", "X XXX X X X X XXX", "X X X X X X", "X X X X XXXXXXXXX", "X X X X X", "XXX X XXXXXXXXX X", "X X X X", "XXXXXXXXXXXXXXX X" ] levels.append(level_1) def setup_maze(level): for y in range(len(level)): for x in range(len(level[y])): chachrecter = level[y][x] screen_x = -233 + (x*24) screen_y = 233 - (y*24) if(chachrecter=="X"): pen.goto(screen_x,screen_y) pen.stamp() pass pass pass pass pen = Pen() setup_maze(levels[1]) turtle.done()
19.688525
43
0.472939
182
1,201
3.032967
0.252747
0.202899
0.255435
0.275362
0.202899
0.190217
0.190217
0.150362
0.150362
0.150362
0
0.027435
0.393006
1,201
61
44
19.688525
0.729767
0
0
0.313725
0
0
0.257903
0
0
0
0
0
0
1
0.039216
false
0.117647
0.019608
0
0.078431
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
4
57a9b5b9ee6431b8be1270f081d95bf45021e044
2,158
py
Python
tests/bugs/core_5217_test.py
FirebirdSQL/firebird-qa
96af2def7f905a06f178e2a80a2c8be4a4b44782
[ "MIT" ]
1
2022-02-05T11:37:13.000Z
2022-02-05T11:37:13.000Z
tests/bugs/core_5217_test.py
FirebirdSQL/firebird-qa
96af2def7f905a06f178e2a80a2c8be4a4b44782
[ "MIT" ]
1
2021-09-03T11:47:00.000Z
2021-09-03T12:42:10.000Z
tests/bugs/core_5217_test.py
FirebirdSQL/firebird-qa
96af2def7f905a06f178e2a80a2c8be4a4b44782
[ "MIT" ]
1
2021-06-30T14:14:16.000Z
2021-06-30T14:14:16.000Z
#coding:utf-8 # # id: bugs.core_5217 # title: ISQL -x may crash while exporting an exception with message text length > 127 bytes # decription: # # tracker_id: CORE-5217 # min_versions: ['2.5.6'] # versions: 2.5.6 # qmid: None import pytest from firebird.qa import db_factory, isql_act, Action # version: 2.5.6 # resources: None substitutions_1 = [] init_script_1 = """""" db_1 = db_factory(sql_dialect=3, init=init_script_1) test_script_1 = """ recreate exception exc_test_a '1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123'; recreate exception exc_test_b '12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234'; recreate exception exc_test_c '123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345'; commit; set list on; set count on; select rdb$exception_name, rdb$message from rdb$exceptions order by rdb$exception_name; """ act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) expected_stdout_1 = """ RDB$EXCEPTION_NAME EXC_TEST_A RDB$MESSAGE 1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123 RDB$EXCEPTION_NAME EXC_TEST_B RDB$MESSAGE 12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234 RDB$EXCEPTION_NAME EXC_TEST_C RDB$MESSAGE 123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345 Records affected: 3 """ @pytest.mark.version('>=2.5.6') def test_1(act_1: Action): act_1.expected_stdout = expected_stdout_1 act_1.execute() assert act_1.clean_stdout == act_1.clean_expected_stdout
33.71875
171
0.762743
190
2,158
8.389474
0.410526
0.026349
0.050188
0.045169
0.043287
0
0
0
0
0
0
0.477721
0.178406
2,158
63
172
34.253968
0.42132
0.133457
0
0.058824
0
0
0.74098
0.437803
0
0
0
0
0.029412
1
0.029412
false
0
0.058824
0
0.088235
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
57c73355483d10c897bec8a58356b55e5c9abec9
277
py
Python
src/note/serializers/__init__.py
ResearchHub/ResearchHub-Backend-Open
d36dca33afae2d442690694bb2ab17180d84bcd3
[ "MIT" ]
18
2021-05-20T13:20:16.000Z
2022-02-11T02:40:18.000Z
src/note/serializers/__init__.py
ResearchHub/ResearchHub-Backend-Open
d36dca33afae2d442690694bb2ab17180d84bcd3
[ "MIT" ]
109
2021-05-21T20:14:23.000Z
2022-03-31T20:56:10.000Z
src/note/serializers/__init__.py
ResearchHub/ResearchHub-Backend-Open
d36dca33afae2d442690694bb2ab17180d84bcd3
[ "MIT" ]
4
2021-05-17T13:47:53.000Z
2022-02-12T10:48:21.000Z
from note.serializers.note_serializer import ( NoteSerializer, NoteContentSerializer, DynamicNoteSerializer, DynamicNoteContentSerializer, ) from note.serializers.note_template_serializer import ( NoteTemplateSerializer, DynamicNoteTemplateSerializer )
25.181818
55
0.808664
19
277
11.631579
0.631579
0.072398
0.171946
0.208145
0
0
0
0
0
0
0
0
0.144404
277
10
56
27.7
0.932489
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.2
0
0.2
0
1
0
1
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
57e50d184a2c8fb8315458ec64f2672d33bd8900
41
py
Python
Autokey/CapsKeybinds/shift alt/end.py
MisaghM/Capslock-Keybindings
00332c7d39cf776c43fe13aa08e1c2969747425d
[ "MIT" ]
1
2021-11-05T19:39:36.000Z
2021-11-05T19:39:36.000Z
Autokey/CapsKeybinds/shift alt/end.py
MisaghM/Capslock-Keybindings
00332c7d39cf776c43fe13aa08e1c2969747425d
[ "MIT" ]
null
null
null
Autokey/CapsKeybinds/shift alt/end.py
MisaghM/Capslock-Keybindings
00332c7d39cf776c43fe13aa08e1c2969747425d
[ "MIT" ]
null
null
null
keyboard.send_keys("<shift>+<alt>+<end>")
41
41
0.682927
6
41
4.5
1
0
0
0
0
0
0
0
0
0
0
0
0
41
1
41
41
0.658537
0
0
0
0
0
0.452381
0
0
0
0
0
0
1
0
true
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
17fe2a1d94fb554d10231a5c9257fb4deef32d05
85
py
Python
ipynb/oscovida/__init__.py
skirienko/oscovida.github.io
eda5412d02365a8a000239be5480512c53bee8c2
[ "CC-BY-4.0" ]
2
2020-06-19T09:16:14.000Z
2021-01-24T17:47:56.000Z
ipynb/oscovida/__init__.py
skirienko/oscovida.github.io
eda5412d02365a8a000239be5480512c53bee8c2
[ "CC-BY-4.0" ]
8
2020-04-20T16:49:49.000Z
2021-12-25T16:54:19.000Z
ipynb/oscovida/__init__.py
skirienko/oscovida.github.io
eda5412d02365a8a000239be5480512c53bee8c2
[ "CC-BY-4.0" ]
4
2020-04-20T13:24:45.000Z
2021-01-29T11:12:12.000Z
from .oscovida import * from .metadata import MetadataRegion __version__ = "0.1.0"
14.166667
36
0.752941
11
85
5.454545
0.727273
0
0
0
0
0
0
0
0
0
0
0.041667
0.152941
85
5
37
17
0.791667
0
0
0
0
0
0.058824
0
0
0
0
0
0
1
0
false
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
aa02010115495a8c1a2a419b23de12127ca0f5ff
627
py
Python
larlib/larlib/__init__.py
cvdlab/lar-cc
7092965acf7c0c78a5fab4348cf2c2aa01c4b130
[ "MIT", "Unlicense" ]
1
2016-09-20T04:48:12.000Z
2016-09-20T04:48:12.000Z
larlib/larlib/__init__.py
Ahdhn/lar-cc
7092965acf7c0c78a5fab4348cf2c2aa01c4b130
[ "MIT", "Unlicense" ]
1
2018-02-20T21:57:07.000Z
2018-02-21T07:18:11.000Z
larlib/larlib/__init__.py
Ahdhn/lar-cc
7092965acf7c0c78a5fab4348cf2c2aa01c4b130
[ "MIT", "Unlicense" ]
7
2016-11-04T10:47:42.000Z
2018-04-10T17:32:50.000Z
import scipy import pyplasm from scipy import * from pyplasm import * #from triangle import * #from support import * from copy import copy from p2t import * from myfont import * from larstruct import * from lar2psm import * from simplexn import * from largrid import * from boundary import * from larcc import * from integr import * from inters import * from triangulation import * from architectural import * from boolean import * from hijson import * from iot3d import * from hospital import * from mapper import * from morph import * from splines import * from splitcell import * #from support import * from sysml import *
20.225806
27
0.773525
86
627
5.639535
0.325581
0.515464
0.070103
0.094845
0.11134
0
0
0
0
0
0
0.005837
0.180223
627
31
28
20.225806
0.937743
0.102073
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
4
aa05c0531a15b764d4bcac552c0fcc7851c3cc52
263
py
Python
RESTful_Face_Web/service/base_service.py
luzhoutao/face
ae5910f57ae86412e8a56adbefe4f260dde216f0
[ "MIT" ]
null
null
null
RESTful_Face_Web/service/base_service.py
luzhoutao/face
ae5910f57ae86412e8a56adbefe4f260dde216f0
[ "MIT" ]
null
null
null
RESTful_Face_Web/service/base_service.py
luzhoutao/face
ae5910f57ae86412e8a56adbefe4f260dde216f0
[ "MIT" ]
null
null
null
class BaseService: def is_valid_input_data(self, data=None, app=None): raise NotImplementedError("is_valid_input_data() Not Implement yet!") def execute(self, *args, **kwargs): raise NotImplementedError("execute() Not Implemented yet !")
37.571429
77
0.711027
32
263
5.65625
0.59375
0.077348
0.132597
0.176796
0
0
0
0
0
0
0
0
0.174905
263
6
78
43.833333
0.834101
0
0
0
0
0
0.269962
0.079848
0
0
0
0
0
1
0.4
false
0
0
0
0.6
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
4
aa2dde39f14f8ded882816b141a9b931b7bca0b6
22
py
Python
simplecv/_impl/preprocess/det.py
Bobholamovic/SimpleCV
f4edacf088d0155725a469e227de847820bdfa53
[ "MIT" ]
44
2019-05-12T10:02:23.000Z
2022-01-26T07:30:45.000Z
simplecv/_impl/preprocess/det.py
Z-Zheng/simplecv
4fa67581441ad150e82b3aa2c394a921f74e4ecd
[ "MIT" ]
6
2019-11-05T02:23:18.000Z
2021-06-15T07:06:41.000Z
simplecv/_impl/preprocess/det.py
Bobholamovic/SimpleCV
f4edacf088d0155725a469e227de847820bdfa53
[ "MIT" ]
8
2019-07-07T08:58:20.000Z
2022-03-19T08:57:33.000Z
""" Closed source """
5.5
13
0.545455
2
22
6
1
0
0
0
0
0
0
0
0
0
0
0
0.181818
22
3
14
7.333333
0.666667
0.590909
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
a4b414f09ee057d4f770f86408c2d74131271c8b
56
py
Python
openpnm/models/collections/physics/__init__.py
xu-kai-xu/OpenPNM
61d5fc4729a0a29291cf6c53c07c4246e7a13714
[ "MIT" ]
2
2019-08-24T09:17:40.000Z
2020-07-05T07:21:21.000Z
openpnm/models/collections/physics/__init__.py
xu-kai-xu/OpenPNM
61d5fc4729a0a29291cf6c53c07c4246e7a13714
[ "MIT" ]
null
null
null
openpnm/models/collections/physics/__init__.py
xu-kai-xu/OpenPNM
61d5fc4729a0a29291cf6c53c07c4246e7a13714
[ "MIT" ]
null
null
null
from .basic import basic from .standard import standard
18.666667
30
0.821429
8
56
5.75
0.5
0
0
0
0
0
0
0
0
0
0
0
0.142857
56
2
31
28
0.958333
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
a4b65bcb42a6543a702401eb7207adc3d1c60005
207
py
Python
home/admin.py
SmithJesko/volny-films
7c50713eb1d2c2d5984700a5de20a12e4045e1b9
[ "MIT" ]
1
2021-02-23T00:12:43.000Z
2021-02-23T00:12:43.000Z
home/admin.py
SmithJesko/volny-films
7c50713eb1d2c2d5984700a5de20a12e4045e1b9
[ "MIT" ]
null
null
null
home/admin.py
SmithJesko/volny-films
7c50713eb1d2c2d5984700a5de20a12e4045e1b9
[ "MIT" ]
1
2021-02-23T06:04:13.000Z
2021-02-23T06:04:13.000Z
from django.contrib import admin from .models import Movie class MovieAdmin(admin.ModelAdmin): list_display = ('title', 'release_date', 'language', 'popularity') admin.site.register(Movie, MovieAdmin)
25.875
70
0.763285
25
207
6.24
0.76
0
0
0
0
0
0
0
0
0
0
0
0.115942
207
8
71
25.875
0.852459
0
0
0
0
0
0.168269
0
0
0
0
0
0
1
0
false
0
0.4
0
0.8
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
a4bc756b968d0545c4b31366e574c74c4bcf55d9
156
py
Python
ABC/A/0212.py
taro-masuda/AtCoder
e8cb050260c1dff4ef61a27d1a3a2a8029fc939a
[ "MIT" ]
null
null
null
ABC/A/0212.py
taro-masuda/AtCoder
e8cb050260c1dff4ef61a27d1a3a2a8029fc939a
[ "MIT" ]
null
null
null
ABC/A/0212.py
taro-masuda/AtCoder
e8cb050260c1dff4ef61a27d1a3a2a8029fc939a
[ "MIT" ]
null
null
null
A, B = map(int, input().split()) if 0 < A and B == 0: print('Gold') elif A == 0 and 0 < B: print('Silver') elif 0 < A and 0 < B: print('Alloy')
19.5
32
0.50641
30
156
2.633333
0.466667
0.050633
0.126582
0.253165
0
0
0
0
0
0
0
0.053571
0.282051
156
8
33
19.5
0.651786
0
0
0
0
0
0.095541
0
0
0
0
0
0
1
0
true
0
0
0
0
0.428571
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
4
a4de13c55c29aad0cee482a41d19ebb77a6e2429
68
py
Python
scripts/report/__init__.py
mathias-sm/mne-bids-pipeline
55a8d7c7ca5a254ff7b9af84b818b164692667d5
[ "BSD-3-Clause" ]
null
null
null
scripts/report/__init__.py
mathias-sm/mne-bids-pipeline
55a8d7c7ca5a254ff7b9af84b818b164692667d5
[ "BSD-3-Clause" ]
null
null
null
scripts/report/__init__.py
mathias-sm/mne-bids-pipeline
55a8d7c7ca5a254ff7b9af84b818b164692667d5
[ "BSD-3-Clause" ]
null
null
null
from . import _01_make_reports SCRIPTS = ( _01_make_reports, )
11.333333
30
0.720588
9
68
4.777778
0.666667
0.27907
0.604651
0
0
0
0
0
0
0
0
0.074074
0.205882
68
5
31
13.6
0.722222
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.25
0
0.25
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
350717d26b68705eaa4c0c40f77299753abc7b02
1,539
py
Python
pwn/toplevel.py
kristoff3r/pwntools
9d94de956501dcf5f60c5a382c9a52078df99533
[ "MIT" ]
1
2019-09-30T04:00:13.000Z
2019-09-30T04:00:13.000Z
pwn/toplevel.py
kristoff3r/pwntools
9d94de956501dcf5f60c5a382c9a52078df99533
[ "MIT" ]
null
null
null
pwn/toplevel.py
kristoff3r/pwntools
9d94de956501dcf5f60c5a382c9a52078df99533
[ "MIT" ]
null
null
null
# Get all the modules from pwnlib from pwnlib import * # Promote functions from these modules to toplevel from pwnlib.asm import asm, disasm, cpp from pwnlib.context import context from pwnlib.dynelf import DynELF from pwnlib.elf import ELF, load from pwnlib.exception import PwnlibException from pwnlib.memleak import MemLeak from pwnlib.replacements import * from pwnlib.rop import ROP from pwnlib.tubes.listen import listen from pwnlib.tubes.process import process from pwnlib.tubes.remote import remote from pwnlib.tubes.serialtube import serialtube from pwnlib.tubes.ssh import ssh from pwnlib.tubes.timeout import Timeout from pwnlib.tubes.tube import tube from pwnlib.ui import * from pwnlib.util import crc, net, proc, iters from pwnlib.util.cyclic import * from pwnlib.util.fiddling import * from pwnlib.util.hashes import * from pwnlib.util.lists import * from pwnlib.util.misc import * from pwnlib.util.packing import * from pwnlib.util.proc import pidof from pwnlib.util.splash import * from pwnlib.util.web import * # Promote these modules, so that "from pwn import *" will let you access them import \ collections , operator , os , pwn , \ pwnlib , re , string , struct , \ subprocess , sys , threading , time , \ tempfile
40.5
77
0.637427
185
1,539
5.302703
0.356757
0.285423
0.142712
0.163099
0
0
0
0
0
0
0
0
0.309292
1,539
37
78
41.594595
0.92286
0.101365
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.875
0
0.875
0
0
0
0
null
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
35304046c7abb5735e3e1420fba412edcb13da3d
27,687
py
Python
opencivicdata/elections/migrations/0001_initial.py
tubaman/python-opencivicdata
010cd72bdd806e76f342195a1f1e20acbed5a431
[ "BSD-3-Clause" ]
null
null
null
opencivicdata/elections/migrations/0001_initial.py
tubaman/python-opencivicdata
010cd72bdd806e76f342195a1f1e20acbed5a431
[ "BSD-3-Clause" ]
null
null
null
opencivicdata/elections/migrations/0001_initial.py
tubaman/python-opencivicdata
010cd72bdd806e76f342195a1f1e20acbed5a431
[ "BSD-3-Clause" ]
null
null
null
# -*- coding: utf-8 -*- # Generated by Django 1.11.2 on 2017-06-23 15:20 from __future__ import unicode_literals import django.contrib.postgres.fields import django.contrib.postgres.fields.jsonb import django.core.validators from django.db import migrations, models import django.db.models.deletion import opencivicdata.core.models.base import uuid class Migration(migrations.Migration): initial = True dependencies = [ ('core', '0001_initial'), ] operations = [ migrations.CreateModel( name='BallotMeasureContest', fields=[ ('created_at', models.DateTimeField(auto_now_add=True)), ('updated_at', models.DateTimeField(auto_now=True)), ('extras', django.contrib.postgres.fields.jsonb.JSONField(blank=True, default=dict)), ('locked_fields', django.contrib.postgres.fields.ArrayField(base_field=models.TextField(), blank=True, default=list, size=None)), ('id', opencivicdata.core.models.base.OCDIDField(help_text='Open Civic Data-style id in the format ``ocd-contest/{{uuid}}``.', ocd_type='contest', serialize=False, validators=[django.core.validators.RegexValidator(flags=32, message='ID must match ^ocd-contest/[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$', regex='^ocd-contest/[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$')])), ('name', models.CharField(help_text='Name of the contest, not necessarily as it appears on the ballot.', max_length=300)), ('description', models.TextField(help_text='Text describing the purpose and/or potential outcomes of the ballot measure, not necessarily as it appears on the ballot.')), ('requirement', models.CharField(blank=True, default='50% plus one vote', help_text='The threshold of votes the ballot measure needs in order to pass.', max_length=300)), ('classification', models.CharField(blank=True, help_text='Describes the origin and/or potential outcome of the ballot measure, e.g., "initiative statute", "legislative constitutional amendment".', max_length=300)), ('division', models.ForeignKey(help_text="Reference to the Division that defines the political geography of the contest, e.g., a specific Congressional or State Senate district. Should be a subdivision of the Division referenced by the contest's Election.", on_delete=django.db.models.deletion.CASCADE, related_name='ballotmeasurecontests', related_query_name='ballotmeasurecontests', to='core.Division')), ], options={ 'db_table': 'opencivicdata_ballotmeasurecontest', }, ), migrations.CreateModel( name='BallotMeasureContestIdentifier', fields=[ ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('identifier', models.CharField(max_length=300)), ('scheme', models.CharField(max_length=300)), ('contest', models.ForeignKey(help_text='Reference to the BallotMeasureContest linked to the upstream identifier.', on_delete=django.db.models.deletion.CASCADE, related_name='identifiers', to='elections.BallotMeasureContest')), ], options={ 'db_table': 'opencivicdata_ballotmeasurecontestidentifier', }, ), migrations.CreateModel( name='BallotMeasureContestOption', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('text', models.CharField(help_text='Text of the option, not necessarily as it appears on the ballot.', max_length=300)), ('contest', models.ForeignKey(help_text='Reference to the BallotMeasureContest.', on_delete=django.db.models.deletion.CASCADE, related_name='options', to='elections.BallotMeasureContest')), ], options={ 'db_table': 'opencivicdata_ballotmeasurecontestoption', }, ), migrations.CreateModel( name='BallotMeasureContestSource', fields=[ ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('note', models.CharField(blank=True, max_length=300)), ('url', models.URLField(max_length=2000)), ('contest', models.ForeignKey(help_text='Reference to the BallotMeasureContest assembled from the source.', on_delete=django.db.models.deletion.CASCADE, related_name='sources', to='elections.BallotMeasureContest')), ], options={ 'db_table': 'opencivicdata_ballotmeasurecontestsource', }, ), migrations.CreateModel( name='Candidacy', fields=[ ('created_at', models.DateTimeField(auto_now_add=True)), ('updated_at', models.DateTimeField(auto_now=True)), ('extras', django.contrib.postgres.fields.jsonb.JSONField(blank=True, default=dict)), ('locked_fields', django.contrib.postgres.fields.ArrayField(base_field=models.TextField(), blank=True, default=list, size=None)), ('id', opencivicdata.core.models.base.OCDIDField(help_text='Open Civic Data-style id in the format ``ocd-candidacy/{{uuid}}``.', ocd_type='candidacy', serialize=False, validators=[django.core.validators.RegexValidator(flags=32, message='ID must match ^ocd-candidacy/[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$', regex='^ocd-candidacy/[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$')])), ('candidate_name', models.CharField(help_text="For preserving the candidate's name as it was of the candidacy.", max_length=300)), ('filed_date', models.DateField(help_text='Specifies when the candidate filed for the contest.', null=True)), ('is_incumbent', models.NullBooleanField(help_text='Indicates whether the candidate is seeking re-election to a public office he/she currently holds')), ('registration_status', models.CharField(choices=[('filed', 'Filed'), ('qualified', 'Qualified'), ('withdrawn', 'Withdrawn'), ('write-in', 'Write-in')], help_text='Registration status of the candidate.', max_length=10, null=True)), ], options={ 'verbose_name_plural': 'candidacies', 'db_table': 'opencivicdata_candidacy', 'ordering': ('contest', 'post', 'person'), }, ), migrations.CreateModel( name='CandidacySource', fields=[ ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('note', models.CharField(blank=True, max_length=300)), ('url', models.URLField(max_length=2000)), ('candidacy', models.ForeignKey(help_text='Reference to the assembed Candidacy.', on_delete=django.db.models.deletion.CASCADE, related_name='sources', to='elections.Candidacy')), ], options={ 'db_table': 'opencivicdata_candidacysource', }, ), migrations.CreateModel( name='CandidateContest', fields=[ ('created_at', models.DateTimeField(auto_now_add=True)), ('updated_at', models.DateTimeField(auto_now=True)), ('extras', django.contrib.postgres.fields.jsonb.JSONField(blank=True, default=dict)), ('locked_fields', django.contrib.postgres.fields.ArrayField(base_field=models.TextField(), blank=True, default=list, size=None)), ('id', opencivicdata.core.models.base.OCDIDField(help_text='Open Civic Data-style id in the format ``ocd-contest/{{uuid}}``.', ocd_type='contest', serialize=False, validators=[django.core.validators.RegexValidator(flags=32, message='ID must match ^ocd-contest/[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$', regex='^ocd-contest/[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$')])), ('name', models.CharField(help_text='Name of the contest, not necessarily as it appears on the ballot.', max_length=300)), ('previous_term_unexpired', models.BooleanField(default=False, help_text='Indicates the previous public office holder vacated the post before serving a full term.')), ('number_elected', models.IntegerField(default=1, help_text="Number of candidates that are elected in the contest, i.e. 'N' of N-of-M.")), ('division', models.ForeignKey(help_text="Reference to the Division that defines the political geography of the contest, e.g., a specific Congressional or State Senate district. Should be a subdivision of the Division referenced by the contest's Election.", on_delete=django.db.models.deletion.CASCADE, related_name='candidatecontests', related_query_name='candidatecontests', to='core.Division')), ], options={ 'db_table': 'opencivicdata_candidatecontest', }, ), migrations.CreateModel( name='CandidateContestIdentifier', fields=[ ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('identifier', models.CharField(max_length=300)), ('scheme', models.CharField(max_length=300)), ('contest', models.ForeignKey(help_text='Reference to the CandidateContest linked to the upstream identifier.', on_delete=django.db.models.deletion.CASCADE, related_name='identifiers', to='elections.CandidateContest')), ], options={ 'db_table': 'opencivicdata_candidatecontestidentifier', }, ), migrations.CreateModel( name='CandidateContestPost', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('sort_order', models.IntegerField(default=0, help_text='Useful for sorting for contests where two or more public offices are at stake, e.g., in a U.S. presidential contest, the President post would have a lower sort order than the Vice President post.')), ('contest', models.ForeignKey(help_text='Reference to the CandidateContest in which the Post is at stake.', on_delete=django.db.models.deletion.CASCADE, related_name='posts', to='elections.CandidateContest')), ('post', models.ForeignKey(help_text='Reference to the Post at stake in the CandidateContest.', on_delete=django.db.models.deletion.CASCADE, related_name='contests', to='core.Post')), ], options={ 'db_table': 'opencivicdata_candidatecontestpost', 'ordering': ('contest', 'sort_order'), }, ), migrations.CreateModel( name='CandidateContestSource', fields=[ ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('note', models.CharField(blank=True, max_length=300)), ('url', models.URLField(max_length=2000)), ('contest', models.ForeignKey(help_text='Reference to the CandidateContest assembled from the source.', on_delete=django.db.models.deletion.CASCADE, related_name='sources', to='elections.CandidateContest')), ], options={ 'db_table': 'opencivicdata_candidatecontestsource', }, ), migrations.CreateModel( name='Election', fields=[ ('created_at', models.DateTimeField(auto_now_add=True)), ('updated_at', models.DateTimeField(auto_now=True)), ('extras', django.contrib.postgres.fields.jsonb.JSONField(blank=True, default=dict)), ('locked_fields', django.contrib.postgres.fields.ArrayField(base_field=models.TextField(), blank=True, default=list, size=None)), ('id', opencivicdata.core.models.base.OCDIDField(help_text='Open Civic Data-style id in the format ``ocd-election/{{uuid}}``.', ocd_type='election', serialize=False, validators=[django.core.validators.RegexValidator(flags=32, message='ID must match ^ocd-election/[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$', regex='^ocd-election/[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$')])), ('name', models.CharField(help_text='Name of the Election.', max_length=300)), ('date', models.DateField(help_text="Final or only date when eligible voters may cast their ballots in the Election. Typically this is also the same date when results of the election's contests are first publicly reported.")), ('administrative_organization', models.ForeignKey(help_text='Reference to the Organization that administers the election.', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='elections', to='core.Organization')), ('division', models.ForeignKey(help_text='Reference to the Division that defines the broadest political geography of any contest to be decided by the election.', on_delete=django.db.models.deletion.CASCADE, related_name='elections', to='core.Division')), ], options={ 'db_table': 'opencivicdata_election', 'ordering': ('-date',), }, ), migrations.CreateModel( name='ElectionIdentifier', fields=[ ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('identifier', models.CharField(max_length=300)), ('scheme', models.CharField(max_length=300)), ('election', models.ForeignKey(help_text='Reference to the Election identified by the identifier.', on_delete=django.db.models.deletion.CASCADE, related_name='identifiers', to='elections.Election')), ], options={ 'db_table': 'opencivicdata_electionidentifier', }, ), migrations.CreateModel( name='ElectionSource', fields=[ ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('note', models.CharField(blank=True, max_length=300)), ('url', models.URLField(max_length=2000)), ('event', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sources', to='elections.Election')), ], options={ 'db_table': 'opencivicdata_electionsource', }, ), migrations.CreateModel( name='PartyContest', fields=[ ('created_at', models.DateTimeField(auto_now_add=True)), ('updated_at', models.DateTimeField(auto_now=True)), ('extras', django.contrib.postgres.fields.jsonb.JSONField(blank=True, default=dict)), ('locked_fields', django.contrib.postgres.fields.ArrayField(base_field=models.TextField(), blank=True, default=list, size=None)), ('id', opencivicdata.core.models.base.OCDIDField(help_text='Open Civic Data-style id in the format ``ocd-contest/{{uuid}}``.', ocd_type='contest', serialize=False, validators=[django.core.validators.RegexValidator(flags=32, message='ID must match ^ocd-contest/[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$', regex='^ocd-contest/[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$')])), ('name', models.CharField(help_text='Name of the contest, not necessarily as it appears on the ballot.', max_length=300)), ('division', models.ForeignKey(help_text="Reference to the Division that defines the political geography of the contest, e.g., a specific Congressional or State Senate district. Should be a subdivision of the Division referenced by the contest's Election.", on_delete=django.db.models.deletion.CASCADE, related_name='partycontests', related_query_name='partycontests', to='core.Division')), ('election', models.ForeignKey(help_text='Reference to the Election in which the contest is decided.', on_delete=django.db.models.deletion.CASCADE, related_name='partycontests', related_query_name='partycontests', to='elections.Election')), ('runoff_for_contest', models.OneToOneField(help_text='If this contest is a runoff to determine the outcome of a previously undecided contest, reference to that PartyContest.', null=True, on_delete=django.db.models.deletion.CASCADE, to='elections.PartyContest')), ], options={ 'db_table': 'opencivicdata_partycontest', }, ), migrations.CreateModel( name='PartyContestIdentifier', fields=[ ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('identifier', models.CharField(max_length=300)), ('scheme', models.CharField(max_length=300)), ('contest', models.ForeignKey(help_text='Reference to the PartyContest linked to the upstream identifier.', on_delete=django.db.models.deletion.CASCADE, related_name='identifiers', to='elections.PartyContest')), ], options={ 'db_table': 'opencivicdata_partyidentifier', }, ), migrations.CreateModel( name='PartyContestOption', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('is_incumbent', models.NullBooleanField(help_text='Indicates whether the party currently holds majority power.')), ('contest', models.ForeignKey(help_text='Reference to the PartyContest in which the Party is an option.', on_delete=django.db.models.deletion.CASCADE, related_name='parties', to='elections.PartyContest')), ('party', models.ForeignKey(help_text='Reference to the Party option in the PartyContest.', on_delete=django.db.models.deletion.CASCADE, related_name='party_contests', to='core.Organization')), ], options={ 'db_table': 'opencivicdata_partycontestoption', 'ordering': ('contest', 'party'), }, ), migrations.CreateModel( name='PartyContestSource', fields=[ ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('note', models.CharField(blank=True, max_length=300)), ('url', models.URLField(max_length=2000)), ('contest', models.ForeignKey(help_text='Reference to the PartyContest assembled from the source.', on_delete=django.db.models.deletion.CASCADE, related_name='sources', to='elections.PartyContest')), ], options={ 'db_table': 'opencivicdata_partysource', }, ), migrations.CreateModel( name='RetentionContest', fields=[ ('created_at', models.DateTimeField(auto_now_add=True)), ('updated_at', models.DateTimeField(auto_now=True)), ('extras', django.contrib.postgres.fields.jsonb.JSONField(blank=True, default=dict)), ('locked_fields', django.contrib.postgres.fields.ArrayField(base_field=models.TextField(), blank=True, default=list, size=None)), ('id', opencivicdata.core.models.base.OCDIDField(help_text='Open Civic Data-style id in the format ``ocd-contest/{{uuid}}``.', ocd_type='contest', serialize=False, validators=[django.core.validators.RegexValidator(flags=32, message='ID must match ^ocd-contest/[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$', regex='^ocd-contest/[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$')])), ('name', models.CharField(help_text='Name of the contest, not necessarily as it appears on the ballot.', max_length=300)), ('description', models.TextField(help_text='Text describing the purpose and/or potential outcomes of the contest, not necessarily as it appears on the ballot.')), ('requirement', models.CharField(blank=True, default='50% plus one vote', help_text='The threshold of votes need in order to retain the officeholder.', max_length=300)), ('division', models.ForeignKey(help_text="Reference to the Division that defines the political geography of the contest, e.g., a specific Congressional or State Senate district. Should be a subdivision of the Division referenced by the contest's Election.", on_delete=django.db.models.deletion.CASCADE, related_name='retentioncontests', related_query_name='retentioncontests', to='core.Division')), ('election', models.ForeignKey(help_text='Reference to the Election in which the contest is decided.', on_delete=django.db.models.deletion.CASCADE, related_name='retentioncontests', related_query_name='retentioncontests', to='elections.Election')), ('membership', models.ForeignKey(help_text='Reference to the Membership that represents the tenure of a person in a specific public office.', on_delete=django.db.models.deletion.CASCADE, to='core.Membership')), ('runoff_for_contest', models.OneToOneField(help_text='If this contest is a runoff to determine the outcome of a previously undecided contest, reference to that RetentionContest.', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='runoff_contest', to='elections.RetentionContest')), ], options={ 'db_table': 'opencivicdata_retentioncontest', }, ), migrations.CreateModel( name='RetentionContestIdentifier', fields=[ ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('identifier', models.CharField(max_length=300)), ('scheme', models.CharField(max_length=300)), ('contest', models.ForeignKey(help_text='Reference to the RetentionContest linked to the upstream identifier.', on_delete=django.db.models.deletion.CASCADE, related_name='identifiers', to='elections.RetentionContest')), ], options={ 'db_table': 'opencivicdata_retentionidentifier', }, ), migrations.CreateModel( name='RetentionContestOption', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('text', models.CharField(help_text='Text of the option, not necessarily as it appears on the ballot.', max_length=300)), ('contest', models.ForeignKey(help_text='Reference to the RetentionContest.', on_delete=django.db.models.deletion.CASCADE, related_name='options', to='elections.RetentionContest')), ], options={ 'db_table': 'opencivicdata_retentioncontestoption', }, ), migrations.CreateModel( name='RetentionContestSource', fields=[ ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('note', models.CharField(blank=True, max_length=300)), ('url', models.URLField(max_length=2000)), ('contest', models.ForeignKey(help_text='Reference to the RetentionContest assembled from the source.', on_delete=django.db.models.deletion.CASCADE, related_name='sources', to='elections.RetentionContest')), ], options={ 'db_table': 'opencivicdata_retentionsource', }, ), migrations.AddField( model_name='candidatecontest', name='election', field=models.ForeignKey(help_text='Reference to the Election in which the contest is decided.', on_delete=django.db.models.deletion.CASCADE, related_name='candidatecontests', related_query_name='candidatecontests', to='elections.Election'), ), migrations.AddField( model_name='candidatecontest', name='party', field=models.ForeignKey(help_text='If the contest is among candidates of the same political party, e.g., a partisan primary election, reference to the Party.', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='candidate_contests', to='core.Organization'), ), migrations.AddField( model_name='candidatecontest', name='runoff_for_contest', field=models.OneToOneField(help_text='If this contest is a runoff to determine the outcome of a previously undecided contest, reference to that CandidateContest.', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='runoff_contest', to='elections.CandidateContest'), ), migrations.AddField( model_name='candidacy', name='contest', field=models.ForeignKey(help_text='Reference to an OCD CandidateContest representing the contest in which the candidate is competing.', on_delete=django.db.models.deletion.CASCADE, related_name='candidacies', to='elections.CandidateContest'), ), migrations.AddField( model_name='candidacy', name='party', field=models.ForeignKey(help_text='Reference to and Party with which the candidate is affiliated.', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='candidacies', to='core.Organization'), ), migrations.AddField( model_name='candidacy', name='person', field=models.ForeignKey(help_text='Reference to the Person who is the candidate.', on_delete=django.db.models.deletion.CASCADE, related_name='candidacies', to='core.Person'), ), migrations.AddField( model_name='candidacy', name='post', field=models.ForeignKey(help_text='Reference to Post represents the public office for which the candidate is competing.', on_delete=django.db.models.deletion.CASCADE, related_name='candidacies', to='core.Post'), ), migrations.AddField( model_name='candidacy', name='top_ticket_candidacy', field=models.ForeignKey(help_text='If the candidate is running as part of ticket, e.g., a Vice Presidential candidate running with a Presidential candidate, reference to candidacy at the top of the ticket.', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='ticket', to='elections.Candidacy'), ), migrations.AddField( model_name='ballotmeasurecontest', name='election', field=models.ForeignKey(help_text='Reference to the Election in which the contest is decided.', on_delete=django.db.models.deletion.CASCADE, related_name='ballotmeasurecontests', related_query_name='ballotmeasurecontests', to='elections.Election'), ), migrations.AddField( model_name='ballotmeasurecontest', name='runoff_for_contest', field=models.OneToOneField(help_text='If this contest is a runoff to determine the outcome of a previously undecided contest, reference to that BallotMeasureContest.', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='runoff_contest', to='elections.BallotMeasureContest'), ), ]
76.272727
422
0.650197
3,106
27,687
5.684804
0.106568
0.028997
0.030923
0.048593
0.762644
0.754035
0.740726
0.668687
0.663929
0.621906
0
0.013341
0.217575
27,687
362
423
76.483425
0.801736
0.002456
0
0.59887
1
0.079096
0.367178
0.084408
0
0
0
0
0
1
0
false
0.002825
0.022599
0
0.033898
0
0
0
0
null
0
0
0
0
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
35429557af1d81d6c41c3e4be99bea463e9efaab
261
py
Python
fablwriter/masal/admin.py
sevilayerkan/fabl-writer
f9f4f93c3f929564b792023910547f377b2ed2d6
[ "MIT" ]
4
2018-07-29T07:21:01.000Z
2018-08-03T15:40:48.000Z
fablwriter/masal/admin.py
sevilayerkan/fabl-writer
f9f4f93c3f929564b792023910547f377b2ed2d6
[ "MIT" ]
1
2018-08-03T08:01:46.000Z
2018-08-03T08:01:46.000Z
fablwriter/masal/admin.py
sevilayerkan/fabl-writer
f9f4f93c3f929564b792023910547f377b2ed2d6
[ "MIT" ]
7
2018-07-29T07:32:12.000Z
2018-08-01T14:09:42.000Z
from django.contrib import admin from masal.models import Fabl, Baglam, Sahne, Published # Modelleri admin sayfasında yönetebilmek için kayıt ettik admin.site.register(Fabl) admin.site.register(Baglam) admin.site.register(Sahne) admin.site.register(Published)
29
58
0.823755
36
261
5.972222
0.527778
0.167442
0.316279
0
0
0
0
0
0
0
0
0
0.091954
261
8
59
32.625
0.907173
0.214559
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.333333
0
0.333333
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
354991f737e8c45c6ea9c3ad821976e1b5481d51
143
py
Python
app/app/calc.py
hammott/Django-Doker-Restful-E_Commerce
4df8b2c7f79690364978dfbbaba5457799c5bf15
[ "MIT" ]
null
null
null
app/app/calc.py
hammott/Django-Doker-Restful-E_Commerce
4df8b2c7f79690364978dfbbaba5457799c5bf15
[ "MIT" ]
null
null
null
app/app/calc.py
hammott/Django-Doker-Restful-E_Commerce
4df8b2c7f79690364978dfbbaba5457799c5bf15
[ "MIT" ]
null
null
null
def add(x,y): """ADD TOW NUMBER TOGETHER""" return x+y def subtract(x,y): """Subtract x from y and return value""" return y-x
17.875
44
0.594406
25
143
3.4
0.48
0.070588
0
0
0
0
0
0
0
0
0
0
0.251748
143
8
45
17.875
0.794393
0.405594
0
0
0
0
0
0
0
0
0
0
0
1
0.5
false
0
0
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
4
1040b08c5a880aa0e2554df21980b5db1737c0d0
204
py
Python
20_unit_testing/lectures/2_testing_errors/functions.py
gdia/The-Complete-Python-Course
ed375b65242249bc749c3e292a6149f8528b9dcf
[ "MIT" ]
29
2019-09-02T21:15:59.000Z
2022-01-14T02:20:05.000Z
20_unit_testing/lectures/2_testing_errors/functions.py
gdia/The-Complete-Python-Course
ed375b65242249bc749c3e292a6149f8528b9dcf
[ "MIT" ]
2
2020-08-20T05:48:36.000Z
2021-06-02T03:16:31.000Z
20_unit_testing/lectures/2_testing_errors/functions.py
gdia/The-Complete-Python-Course
ed375b65242249bc749c3e292a6149f8528b9dcf
[ "MIT" ]
38
2019-10-20T14:29:12.000Z
2022-03-27T19:50:05.000Z
from typing import Union def divide(dividend: Union[int, float], divisor: Union[int, float]): if divisor == 0: raise ValueError("The divisor cannot be zero.") return dividend / divisor
22.666667
68
0.681373
27
204
5.148148
0.703704
0.115108
0.18705
0
0
0
0
0
0
0
0
0.00625
0.215686
204
8
69
25.5
0.8625
0
0
0
0
0
0.132353
0
0
0
0
0
0
1
0.2
false
0
0.2
0
0.6
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
4
104b1ba2976bd679cca110939f3b091bd99b9c78
91
py
Python
linkpage/apps.py
strassan/www.kaifeck.de
b54698e8e57eaedbc4cb4ea3335fc719df92108f
[ "MIT" ]
2
2020-07-29T17:35:31.000Z
2021-06-06T11:37:49.000Z
linkpage/apps.py
strassan/www.kaifeck.de
b54698e8e57eaedbc4cb4ea3335fc719df92108f
[ "MIT" ]
4
2020-10-28T19:06:02.000Z
2021-01-11T16:06:46.000Z
linkpage/apps.py
strassan/www.kaifeck.de
b54698e8e57eaedbc4cb4ea3335fc719df92108f
[ "MIT" ]
null
null
null
from django.apps import AppConfig class LinkpageConfig(AppConfig): name = 'linkpage'
15.166667
33
0.758242
10
91
6.9
0.9
0
0
0
0
0
0
0
0
0
0
0
0.164835
91
5
34
18.2
0.907895
0
0
0
0
0
0.087912
0
0
0
0
0
0
1
0
false
0
0.333333
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
1058ec1ace778fcf6e54547f34e91148d4e621f4
2,601
py
Python
trioasm/whatshap/tests/testreadselect.py
shilpagarg/WHdenovo
7a03798397ee0f131f100402d12ad53eab4334dc
[ "MIT" ]
45
2019-03-18T06:57:23.000Z
2021-06-24T12:24:48.000Z
trioasm/whatshap/tests/testreadselect.py
shilpagarg/WHdenovo
7a03798397ee0f131f100402d12ad53eab4334dc
[ "MIT" ]
2
2019-05-06T22:11:22.000Z
2020-01-10T15:14:40.000Z
trioasm/whatshap/tests/testreadselect.py
shilpagarg/WHdenovo
7a03798397ee0f131f100402d12ad53eab4334dc
[ "MIT" ]
7
2019-05-06T22:07:47.000Z
2020-12-11T08:48:26.000Z
from whatshap.core import readselection from .phasingutils import string_to_readset def test_selection(): reads = string_to_readset(""" 1 1 00 0 1 10 1 1 1 11 0 1 1 1 """) selected_reads = readselection(reads, max_cov = 1, bridging = False) assert selected_reads == set([1,5]) selected_reads = readselection(reads, max_cov = 2, bridging = False) assert selected_reads == set([1,3,5]), str(selected_reads) selected_reads = readselection(reads, max_cov = 3, bridging = False) assert selected_reads == set([1,3,5,7]), str(selected_reads) selected_reads = readselection(reads, max_cov = 3, bridging = True) #Here the assert is wrong, because the bridging doesn't come into account , because in the slice_read the selected # reads have already coverage 3 by set ([1,3,5,7]) because first each position has to covered at least once before #the bridging starts assert selected_reads == set([1,3,5,7]), str(selected_reads) def test_selection2(): reads = string_to_readset(""" 1111 111 1 111 1 11 1 11 """) selected_reads = readselection(reads, max_cov = 4, bridging = False) assert selected_reads == set([0,1,2,3]), str(selected_reads) def test_bridging(): reads = string_to_readset(""" 11 00 11 00 11 00 1 1 """) selected_reads = readselection(reads, max_cov = 2, bridging= False) assert selected_reads == set([0,1,2,3,4,5]) selected_reads = readselection(reads, max_cov = 2, bridging= True) #Not sure why 0 is there selected and not 1... assert selected_reads == set([0,3,5,6]) ###Component comparison does not work def test_components_of_readselection(): reads = string_to_readset(""" 111 000 00 00 1 1 """) selected_reads = readselection(reads, max_cov = 2, bridging= False) assert selected_reads == set([0,1,2,3]), str(selected_reads) # assert len(set(new_components.values())) == 2 selected_reads = readselection(reads, max_cov = 2, bridging= True) assert selected_reads == set([0,1,4]), str(selected_reads) # assert len(set(new_components.values())) == 1 #TODO: the below test case seems to be incomplete #def test_tuple_scores(): #'Only example at the moment ' #reads = """ #1 11010 #00 00101 #001 01110 #1 111 #""" #weights = """ #2 13112 #11 23359 #223 56789 #2 111 #""" #rs = string_to_readset(reads, weights) #selected_reads, skipped_reads = readselection(reads, max_cov = 2, bridging= False) #selected_reads, skipped_reads = readselection(reads, max_cov = 2, bridging= True)
27.670213
116
0.67474
384
2,601
4.408854
0.260417
0.207324
0.149439
0.168931
0.585351
0.559362
0.523331
0.502067
0.473715
0.36267
0
0.079981
0.206844
2,601
93
117
27.967742
0.740669
0.31411
0
0.701754
0
0
0.135535
0
0
0
0
0.010753
0.157895
1
0.070175
false
0
0.035088
0
0.105263
0
0
0
0
null
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
4
1086f7cac056501e449d25cb720006421a6f2ccd
50
py
Python
litterbox/feeds/__init__.py
rwightman/tensorflow-litterbox
ddeeb3a6c7de64e5391050ffbb5948feca65ad3c
[ "Apache-2.0" ]
49
2016-09-09T15:31:36.000Z
2022-03-09T09:43:52.000Z
litterbox/feeds/__init__.py
TangxinKevin/tensorflow-litterbox
ddeeb3a6c7de64e5391050ffbb5948feca65ad3c
[ "Apache-2.0" ]
1
2017-06-09T07:24:16.000Z
2017-06-09T15:28:11.000Z
litterbox/feeds/__init__.py
TangxinKevin/tensorflow-litterbox
ddeeb3a6c7de64e5391050ffbb5948feca65ad3c
[ "Apache-2.0" ]
29
2016-09-20T07:29:54.000Z
2021-09-28T08:03:49.000Z
from .image.feed_image import FeedImagesWithLabels
50
50
0.9
6
50
7.333333
0.833333
0
0
0
0
0
0
0
0
0
0
0
0.06
50
1
50
50
0.93617
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
109b0ec55f954afc1f30b1db4623cc95c33c4b1b
1,262
py
Python
prxgt/config.py
praxigento/teq_test_db_schema_attrs
20ec030dc095c644d22631491e066697203d983d
[ "MIT" ]
null
null
null
prxgt/config.py
praxigento/teq_test_db_schema_attrs
20ec030dc095c644d22631491e066697203d983d
[ "MIT" ]
null
null
null
prxgt/config.py
praxigento/teq_test_db_schema_attrs
20ec030dc095c644d22631491e066697203d983d
[ "MIT" ]
null
null
null
__author__ = 'Alex Gusev <alex@flancer64.com>' import json import logging class Config: _filename = None _data = None def __init__(self, filename='config.json'): self._filename = filename def load(self): cfg_file = open(self._filename) self._data = json.load(cfg_file) logging.info("configuration is loaded from file '%s';", self._filename) cfg_file.close() def get_dom_attrs_total(self): return self._data['domain']['attrs_total'] def get_dom_attrs_per_instance_min(self): return self._data['domain']['attrs_per_instance_min'] def get_dom_attrs_per_instance_max(self): return self._data['domain']['attrs_per_instance_max'] def get_dom_inst_total(self): return self._data['domain']['instances_total'] def get_oper_inst_count(self): return self._data['operations']['get_instance']['count'] def get_oper_filter_count(self): return self._data['operations']['get_by_filter']['count'] def get_oper_filter_attrs_max(self): return self._data['operations']['get_by_filter']['attrs_in_filter_max'] def get_oper_filter_attrs_min(self): return self._data['operations']['get_by_filter']['attrs_in_filter_min']
30.047619
79
0.68859
171
1,262
4.637427
0.263158
0.090794
0.141236
0.181589
0.576293
0.47541
0.332913
0.281211
0.131148
0.131148
0
0.001947
0.186212
1,262
41
80
30.780488
0.770204
0
0
0
0
0
0.248811
0.034865
0
0
0
0
0
1
0.344828
false
0
0.068966
0.275862
0.793103
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
4
10a3bea385c89f3f8d9ed67463902ffb42d28244
7,482
py
Python
home/migrations/0003_auto_20210707_0702.py
ChrisMarsh82/iogt
8141421a79b73bd038880a3be92fa6809adced13
[ "BSD-2-Clause" ]
20
2021-04-29T12:36:25.000Z
2022-03-27T12:17:41.000Z
home/migrations/0003_auto_20210707_0702.py
ChrisMarsh82/iogt
8141421a79b73bd038880a3be92fa6809adced13
[ "BSD-2-Clause" ]
892
2021-02-02T13:56:06.000Z
2022-03-31T11:25:44.000Z
home/migrations/0003_auto_20210707_0702.py
ChrisMarsh82/iogt
8141421a79b73bd038880a3be92fa6809adced13
[ "BSD-2-Clause" ]
28
2021-02-19T19:28:37.000Z
2022-03-11T11:46:00.000Z
# Generated by Django 3.1.13 on 2021-07-07 07:02 from django.db import migrations, models import django.db.models.deletion import modelcluster.contrib.taggit import modelcluster.fields class Migration(migrations.Migration): initial = True dependencies = [ ('taggit', '0003_taggeditem_add_unique_index'), ('wagtailmenus', '0023_remove_use_specific'), ('wagtailsvg', '0002_svg_edit_code'), ('home', '0002_auto_20210707_0742'), ('wagtailimages', '0022_uploadedimage'), ('questionnaires', '0001_initial'), ('wagtailcore', '0059_apply_collection_ordering'), ] operations = [ migrations.AddField( model_name='sitesettings', name='registration_survey', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='questionnaires.survey'), ), migrations.AddField( model_name='sitesettings', name='site', field=models.OneToOneField(editable=False, on_delete=django.db.models.deletion.CASCADE, to='wagtailcore.site'), ), migrations.AddField( model_name='sectiontaggeditem', name='content_object', field=modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='tagged_items', to='home.section'), ), migrations.AddField( model_name='sectiontaggeditem', name='tag', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='home_sectiontaggeditem_items', to='taggit.tag'), ), migrations.AddField( model_name='section', name='icon', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='+', to='wagtailsvg.svg'), ), migrations.AddField( model_name='section', name='lead_image', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='+', to='wagtailimages.image'), ), migrations.AddField( model_name='section', name='tags', field=modelcluster.contrib.taggit.ClusterTaggableManager(blank=True, help_text='A comma-separated list of tags.', through='home.SectionTaggedItem', to='taggit.Tag', verbose_name='Tags'), ), migrations.AddField( model_name='iogtflatmenuitem', name='icon', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailsvg.svg'), ), migrations.AddField( model_name='iogtflatmenuitem', name='link_page', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='wagtailcore.page', verbose_name='link to an internal page'), ), migrations.AddField( model_name='iogtflatmenuitem', name='menu', field=modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='iogt_flat_menu_items', to='wagtailmenus.flatmenu'), ), migrations.AddField( model_name='homepagebanner', name='banner_page', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='home.bannerpage'), ), migrations.AddField( model_name='homepagebanner', name='source', field=modelcluster.fields.ParentalKey(blank=True, on_delete=django.db.models.deletion.CASCADE, related_name='home_page_banners', to='wagtailcore.page'), ), migrations.AddField( model_name='featuredcontent', name='content', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='wagtailcore.page'), ), migrations.AddField( model_name='featuredcontent', name='source', field=modelcluster.fields.ParentalKey(blank=True, on_delete=django.db.models.deletion.CASCADE, related_name='featured_content', to='wagtailcore.page'), ), migrations.AddField( model_name='cachesettings', name='site', field=models.OneToOneField(editable=False, on_delete=django.db.models.deletion.CASCADE, to='wagtailcore.site'), ), migrations.AddField( model_name='bannerpage', name='banner_background_image', field=models.ForeignKey(blank=True, help_text='Background image', null=True, on_delete=django.db.models.deletion.PROTECT, related_name='+', to='wagtailimages.image'), ), migrations.AddField( model_name='bannerpage', name='banner_icon_button', field=models.ForeignKey(blank=True, help_text='Icon Button', null=True, on_delete=django.db.models.deletion.PROTECT, related_name='+', to='wagtailimages.image'), ), migrations.AddField( model_name='bannerpage', name='banner_image', field=models.ForeignKey(blank=True, help_text='Image to display as the banner', null=True, on_delete=django.db.models.deletion.PROTECT, related_name='+', to='wagtailimages.image'), ), migrations.AddField( model_name='bannerpage', name='banner_link_page', field=models.ForeignKey(blank=True, help_text='Optional page to which the banner will link to', null=True, on_delete=django.db.models.deletion.PROTECT, related_name='banners', to='wagtailcore.page'), ), migrations.AddField( model_name='articletaggeditem', name='content_object', field=modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='tagged_items', to='home.article'), ), migrations.AddField( model_name='articletaggeditem', name='tag', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='home_articletaggeditem_items', to='taggit.tag'), ), migrations.AddField( model_name='articlerecommendation', name='article', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='home.article'), ), migrations.AddField( model_name='articlerecommendation', name='source', field=modelcluster.fields.ParentalKey(blank=True, on_delete=django.db.models.deletion.CASCADE, related_name='recommended_articles', to='home.article'), ), migrations.AddField( model_name='article', name='icon', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailsvg.svg'), ), migrations.AddField( model_name='article', name='lead_image', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='+', to='wagtailimages.image'), ), migrations.AddField( model_name='article', name='tags', field=modelcluster.contrib.taggit.ClusterTaggableManager(blank=True, help_text='A comma-separated list of tags.', through='home.ArticleTaggedItem', to='taggit.Tag', verbose_name='Tags'), ), ]
48.270968
211
0.641406
779
7,482
6.001284
0.16303
0.044492
0.127914
0.15016
0.827807
0.826952
0.717219
0.660749
0.611551
0.580535
0
0.009715
0.229618
7,482
154
212
48.584416
0.801353
0.006148
0
0.673469
1
0
0.214017
0.042507
0.020408
0
0
0
0
1
0
false
0
0.027211
0
0.054422
0
0
0
0
null
0
0
0
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
52afbae4ebd5a8692782dda360e198ca009fd285
221
py
Python
users/urls.py
frankdevelopero/django-frikr
1845ea0a571f0285afc042618aacb6d7d87d7353
[ "MIT" ]
null
null
null
users/urls.py
frankdevelopero/django-frikr
1845ea0a571f0285afc042618aacb6d7d87d7353
[ "MIT" ]
null
null
null
users/urls.py
frankdevelopero/django-frikr
1845ea0a571f0285afc042618aacb6d7d87d7353
[ "MIT" ]
null
null
null
from django.urls import path from users.views import LoginView, LogoutView urlpatterns = [ path('login/', LoginView.as_view(), name='users_login'), path('logout/', LogoutView.as_view(), name='users_logout'), ]
22.1
63
0.710407
28
221
5.464286
0.535714
0.078431
0.130719
0.196078
0
0
0
0
0
0
0
0
0.135747
221
9
64
24.555556
0.801047
0
0
0
0
0
0.162896
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
1
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
52d7b91dcb15c25ba4e5cbb548e00f83cbcd0209
127
py
Python
logical/converter/qiskit/extensions/simulator/__init__.py
malcolmregan/GateCircuit-to-AnnealerEmbedding
33a1a4ea2ebd707ade0677e0df468d5120a861db
[ "Apache-2.0" ]
null
null
null
logical/converter/qiskit/extensions/simulator/__init__.py
malcolmregan/GateCircuit-to-AnnealerEmbedding
33a1a4ea2ebd707ade0677e0df468d5120a861db
[ "Apache-2.0" ]
1
2019-04-09T02:22:38.000Z
2019-04-09T02:22:38.000Z
logical/converter/qiskit/extensions/simulator/__init__.py
malcolmregan/GateCircuit-to-AnnealerEmbedding
33a1a4ea2ebd707ade0677e0df468d5120a861db
[ "Apache-2.0" ]
null
null
null
from .load import load from .noise import noise from .save import save from .snapshot import snapshot from .wait import wait
15.875
30
0.787402
20
127
5
0.35
0
0
0
0
0
0
0
0
0
0
0
0.173228
127
7
31
18.142857
0.952381
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
52f27a77dc8ab379af9200191eaf5b5df9c99cbe
853
py
Python
app/admin/forms.py
lemocla/business-analysis-project
663de29deb242f028b6defe75d7174a8c37ff596
[ "MIT" ]
10
2022-03-02T19:52:46.000Z
2022-03-29T16:58:12.000Z
app/admin/forms.py
Code-Institute-Community/dlr-project
0073a03adbd94b335df84e8996187de09297644b
[ "MIT" ]
48
2022-03-02T16:45:55.000Z
2022-03-29T19:03:08.000Z
app/admin/forms.py
Code-Institute-Community/dlr-project
0073a03adbd94b335df84e8996187de09297644b
[ "MIT" ]
9
2022-03-02T20:41:27.000Z
2022-03-27T16:37:20.000Z
from wtforms import form, fields class UserForm(form.Form): username = fields.StringField('Userame') email = fields.StringField('Email') password = fields.StringField('Password') is_active = fields.BooleanField('Is active', default=True) is_admin = fields.BooleanField('Is admin', default=False) class OrganisationForm(form.Form): organisation_name = fields.StringField('Organisation Name') latitude = fields.StringField('Latitude') longitude = fields.StringField('Longitude') nace_1 = fields.StringField('Nace 1') nace_1_label = fields.StringField('Nace 1 Label') nace_2 = fields.StringField('Nace 2') nace_2_label = fields.StringField('Nace 2 Label') nace_3 = fields.StringField('Nace 3') nace_3_label = fields.StringField('Nace 3 Label') web_address = fields.StringField('Web Address')
37.086957
63
0.723329
103
853
5.864078
0.300971
0.365894
0.208609
0.129139
0
0
0
0
0
0
0
0.016667
0.15592
853
22
64
38.772727
0.822222
0
0
0
0
0
0.159437
0
0
0
0
0
0
1
0
false
0.055556
0.055556
0
1
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
1
0
0
4
52f8da0487fd9240c62f6d9446427e4935fb5796
1,953
py
Python
isi_sdk/apis/__init__.py
Atomicology/isilon_sdk_python
91039da803ae37ed4abf8d2a3f59c333f3ef1866
[ "MIT" ]
null
null
null
isi_sdk/apis/__init__.py
Atomicology/isilon_sdk_python
91039da803ae37ed4abf8d2a3f59c333f3ef1866
[ "MIT" ]
null
null
null
isi_sdk/apis/__init__.py
Atomicology/isilon_sdk_python
91039da803ae37ed4abf8d2a3f59c333f3ef1866
[ "MIT" ]
null
null
null
from __future__ import absolute_import # import apis into api package from .antivirus_api import AntivirusApi from .audit_api import AuditApi from .auth_api import AuthApi from .auth_groups_api import AuthGroupsApi from .auth_providers_api import AuthProvidersApi from .auth_roles_api import AuthRolesApi from .auth_users_api import AuthUsersApi from .cloud_api import CloudApi from .cluster_api import ClusterApi from .cluster_nodes_api import ClusterNodesApi from .debug_api import DebugApi from .dedupe_api import DedupeApi from .event_api import EventApi from .file_filter_api import FileFilterApi from .filepool_api import FilepoolApi from .filesystem_api import FilesystemApi from .fsa_api import FsaApi from .fsa_results_api import FsaResultsApi from .hardening_api import HardeningApi from .hardware_api import HardwareApi from .job_api import JobApi from .license_api import LicenseApi from .local_api import LocalApi from .network_api import NetworkApi from .network_groupnets_api import NetworkGroupnetsApi from .network_groupnets_subnets_api import NetworkGroupnetsSubnetsApi from .protocols_api import ProtocolsApi from .protocols_hdfs_api import ProtocolsHdfsApi from .quota_api import QuotaApi from .quota_quotas_api import QuotaQuotasApi from .quota_reports_api import QuotaReportsApi from .remotesupport_api import RemotesupportApi from .snapshot_api import SnapshotApi from .snapshot_changelists_api import SnapshotChangelistsApi from .snapshot_snapshots_api import SnapshotSnapshotsApi from .statistics_api import StatisticsApi from .storagepool_api import StoragepoolApi from .sync_api import SyncApi from .sync_policies_api import SyncPoliciesApi from .sync_reports_api import SyncReportsApi from .sync_target_api import SyncTargetApi from .upgrade_api import UpgradeApi from .upgrade_cluster_api import UpgradeClusterApi from .worm_api import WormApi from .zones_api import ZonesApi from .zones_summary_api import ZonesSummaryApi
39.06
69
0.876088
260
1,953
6.307692
0.380769
0.252439
0.019512
0
0
0
0
0
0
0
0
0
0.099846
1,953
49
70
39.857143
0.932878
0.014337
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
4
eac044d3dad030045129e65d187d339931bd2b66
307
py
Python
sdb/tests/sdb_helpers_test.py
gongchengshi/aws
d04d42739e026d2e99936dd046be05293e063e08
[ "MIT" ]
null
null
null
sdb/tests/sdb_helpers_test.py
gongchengshi/aws
d04d42739e026d2e99936dd046be05293e063e08
[ "MIT" ]
null
null
null
sdb/tests/sdb_helpers_test.py
gongchengshi/aws
d04d42739e026d2e99936dd046be05293e063e08
[ "MIT" ]
null
null
null
from aws import USWest2 import aws.sdb sdb = USWest2.sdb() domain = sdb.lookup('crawled-urls.siemens17042013') # domain = sdb.lookup('logs.siemens17042013') # domain = sdb.lookup('skipped-urls.siemens17042013') # domain = sdb.lookup('failed-urls.siemens17042013') count = aws.sdb.count(domain) print count
27.909091
53
0.758958
40
307
5.825
0.375
0.154506
0.257511
0.386266
0.291845
0
0
0
0
0
0
0.122744
0.09772
307
10
54
30.7
0.718412
0.47557
0
0
0
0
0.178344
0.178344
0
0
0
0
0
0
null
null
0
0.333333
null
null
0.166667
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
1
0
0
0
0
4
ead5ca5370b8295da1a86c6827d80fc14bdedda0
80
py
Python
src/AlgoPlus/CTP/__init__.py
yutiansut/AlgoPlus
64259a3946c5283d226aabd7c3084d44c837a2c6
[ "MIT" ]
1
2019-11-01T03:35:33.000Z
2019-11-01T03:35:33.000Z
src/AlgoPlus/CTP/__init__.py
TouQi/AlgoPlus
c7f1b5f7692d357afae5a55e33c922fa55da5c78
[ "MIT" ]
null
null
null
src/AlgoPlus/CTP/__init__.py
TouQi/AlgoPlus
c7f1b5f7692d357afae5a55e33c922fa55da5c78
[ "MIT" ]
1
2021-12-22T16:15:13.000Z
2021-12-22T16:15:13.000Z
# encoding:utf-8 # AlgoPlus量化投资开源框架范例 # 微信公众号:AlgoPlus # 项目官网:http://algo.plus
13.333333
23
0.7375
10
80
5.9
1
0
0
0
0
0
0
0
0
0
0
0.014085
0.1125
80
5
24
16
0.816901
0.875
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
eae47abfa49e65c12475aa70a26227dcedfcaed6
42
py
Python
keepercommander/plugins/mysql/__init__.py
Mkn-yskz/Commandy
e360306f41112534ae71102658f560fd974a1f45
[ "MIT" ]
151
2015-11-02T02:04:46.000Z
2022-01-20T00:07:01.000Z
keepercommander/plugins/mysql/__init__.py
Mkn-yskz/Commandy
e360306f41112534ae71102658f560fd974a1f45
[ "MIT" ]
145
2015-12-31T00:11:35.000Z
2022-03-31T19:13:54.000Z
keepercommander/plugins/mysql/__init__.py
Mkn-yskz/Commandy
e360306f41112534ae71102658f560fd974a1f45
[ "MIT" ]
73
2015-10-30T00:53:10.000Z
2022-03-30T03:50:53.000Z
from .mysql import * __all__ = ["rotate"]
14
20
0.666667
5
42
4.8
1
0
0
0
0
0
0
0
0
0
0
0
0.166667
42
3
21
14
0.685714
0
0
0
0
0
0.139535
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
eaf89cec2ae477059dad21983ca039597501a6d7
1,180
py
Python
Trakttv.bundle/Contents/Libraries/Shared/stash/archives/core/base.py
disrupted/Trakttv.bundle
24712216c71f3b22fd58cb5dd89dad5bb798ed60
[ "RSA-MD" ]
1,346
2015-01-01T14:52:24.000Z
2022-03-28T12:50:48.000Z
Trakttv.bundle/Contents/Libraries/Shared/stash/archives/core/base.py
alcroito/Plex-Trakt-Scrobbler
4f83fb0860dcb91f860d7c11bc7df568913c82a6
[ "RSA-MD" ]
474
2015-01-01T10:27:46.000Z
2022-03-21T12:26:16.000Z
Trakttv.bundle/Contents/Libraries/Shared/stash/archives/core/base.py
alcroito/Plex-Trakt-Scrobbler
4f83fb0860dcb91f860d7c11bc7df568913c82a6
[ "RSA-MD" ]
191
2015-01-02T18:27:22.000Z
2022-03-29T10:49:48.000Z
from stash.core.modules.base import MappingModule import collections class Archive(MappingModule): __group__ = 'archive' @property def serializer(self): return self.stash.serializer def dumps(self, value): return self.serializer.dumps(value) def loads(self, value): return self.serializer.loads(value) def save(self): raise NotImplementedError def delete(self, keys): if not keys: return if not isinstance(keys, collections.Iterable): keys = [keys] for key in keys: del self[key] def get_items(self, keys=None): if keys is None: return self.iteritems() return [(key, self[key]) for key in keys] def set_items(self, items): for key, value in items: self[key] = value def __delitem__(self, key): raise NotImplementedError def __getitem__(self, key): raise NotImplementedError def __iter__(self): raise NotImplementedError def __len__(self): raise NotImplementedError def __setitem__(self, key, value): raise NotImplementedError
21.071429
54
0.619492
131
1,180
5.381679
0.328244
0.204255
0.191489
0.131915
0.178723
0
0
0
0
0
0
0
0.300847
1,180
55
55
21.454545
0.854545
0
0
0.162162
0
0
0.005932
0
0
0
0
0
0
1
0.324324
false
0
0.054054
0.081081
0.594595
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
4
dc4a1a1b3a0387051b938aefae9a958e03daeb78
64,982
py
Python
pysnmp_mibs/DISMAN-SCRIPT-MIB.py
jackjack821/pysnmp-mibs
9835ea0bb2420715caf4ee9aaa07d59bb263acd6
[ "BSD-2-Clause" ]
6
2017-04-21T13:48:08.000Z
2022-01-06T19:42:52.000Z
pysnmp_mibs/DISMAN-SCRIPT-MIB.py
jackjack821/pysnmp-mibs
9835ea0bb2420715caf4ee9aaa07d59bb263acd6
[ "BSD-2-Clause" ]
1
2020-05-05T16:42:25.000Z
2020-05-05T16:42:25.000Z
pysnmp_mibs/DISMAN-SCRIPT-MIB.py
jackjack821/pysnmp-mibs
9835ea0bb2420715caf4ee9aaa07d59bb263acd6
[ "BSD-2-Clause" ]
6
2020-02-08T20:28:49.000Z
2021-09-14T13:36:46.000Z
# # PySNMP MIB module DISMAN-SCRIPT-MIB (http://pysnmp.sf.net) # ASN.1 source http://mibs.snmplabs.com:80/asn1/DISMAN-SCRIPT-MIB # Produced by pysmi-0.0.7 at Sun Feb 14 00:08:15 2016 # On host bldfarm platform Linux version 4.1.13-100.fc21.x86_64 by user goose # Using Python version 3.5.0 (default, Jan 5 2016, 17:11:52) # ( OctetString, Integer, ObjectIdentifier, ) = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier") ( NamedValues, ) = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues") ( ConstraintsIntersection, ConstraintsUnion, ValueSizeConstraint, SingleValueConstraint, ValueRangeConstraint, ) = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ConstraintsUnion", "ValueSizeConstraint", "SingleValueConstraint", "ValueRangeConstraint") ( SnmpAdminString, ) = mibBuilder.importSymbols("SNMP-FRAMEWORK-MIB", "SnmpAdminString") ( NotificationGroup, ModuleCompliance, ObjectGroup, ) = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance", "ObjectGroup") ( MibScalar, MibTable, MibTableRow, MibTableColumn, Counter32, MibIdentifier, ModuleIdentity, Integer32, Counter64, Unsigned32, ObjectIdentity, IpAddress, mib_2, Gauge32, TimeTicks, Bits, iso, NotificationType, ) = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter32", "MibIdentifier", "ModuleIdentity", "Integer32", "Counter64", "Unsigned32", "ObjectIdentity", "IpAddress", "mib-2", "Gauge32", "TimeTicks", "Bits", "iso", "NotificationType") ( TimeInterval, StorageType, TextualConvention, DisplayString, RowStatus, DateAndTime, ) = mibBuilder.importSymbols("SNMPv2-TC", "TimeInterval", "StorageType", "TextualConvention", "DisplayString", "RowStatus", "DateAndTime") scriptMIB = ModuleIdentity((1, 3, 6, 1, 2, 1, 64)).setRevisions(("2001-08-21 00:00", "1999-02-22 18:00",)) if mibBuilder.loadTexts: scriptMIB.setLastUpdated('200108210000Z') if mibBuilder.loadTexts: scriptMIB.setOrganization('IETF Distributed Management Working Group') if mibBuilder.loadTexts: scriptMIB.setContactInfo('WG EMail: disman@dorothy.bmc.com\n Subscribe: disman-request@dorothy.bmc.com\n\n Chair: Randy Presuhn\n BMC Software, Inc.\n\n Postal: Office 1-3141\n 2141 North First Street\n San Jose, California 95131\n USA\n EMail: rpresuhn@bmc.com\n Phone: +1 408 546-1006\n\n Editor: David B. Levi\n Nortel Networks\n Postal: 4401 Great America Parkway\n Santa Clara, CA 95052-8185\n USA\n EMail: dlevi@nortelnetworks.com\n Phone: +1 423 686 0432\n\n Editor: Juergen Schoenwaelder\n TU Braunschweig\n Postal: Bueltenweg 74/75\n 38106 Braunschweig\n Germany\n EMail: schoenw@ibr.cs.tu-bs.de\n Phone: +49 531 391-3283') if mibBuilder.loadTexts: scriptMIB.setDescription('This MIB module defines a set of objects that allow to\n delegate management scripts to distributed managers.') smObjects = MibIdentifier((1, 3, 6, 1, 2, 1, 64, 1)) smNotifications = MibIdentifier((1, 3, 6, 1, 2, 1, 64, 2)) smConformance = MibIdentifier((1, 3, 6, 1, 2, 1, 64, 3)) smLangTable = MibTable((1, 3, 6, 1, 2, 1, 64, 1, 1), ) if mibBuilder.loadTexts: smLangTable.setDescription('This table lists supported script languages.') smLangEntry = MibTableRow((1, 3, 6, 1, 2, 1, 64, 1, 1, 1), ).setIndexNames((0, "DISMAN-SCRIPT-MIB", "smLangIndex")) if mibBuilder.loadTexts: smLangEntry.setDescription('An entry describing a particular language.') smLangIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 64, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1,2147483647))) if mibBuilder.loadTexts: smLangIndex.setDescription("The locally arbitrary, but unique identifier associated\n with this language entry.\n\n The value is expected to remain constant at least from one\n re-initialization of the entity's network management system\n to the next re-initialization.\n\n Note that the data type and the range of this object must\n be consistent with the definition of smScriptLanguage.") smLangLanguage = MibTableColumn((1, 3, 6, 1, 2, 1, 64, 1, 1, 1, 2), ObjectIdentifier()).setMaxAccess("readonly") if mibBuilder.loadTexts: smLangLanguage.setDescription('The globally unique identification of the language.') smLangVersion = MibTableColumn((1, 3, 6, 1, 2, 1, 64, 1, 1, 1, 3), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0,32))).setMaxAccess("readonly") if mibBuilder.loadTexts: smLangVersion.setDescription('The version number of the language. The zero-length string\n shall be used if the language does not have a version\n number.\n\n It is suggested that the version number consist of one or\n more decimal numbers separated by dots, where the first\n number is called the major version number.') smLangVendor = MibTableColumn((1, 3, 6, 1, 2, 1, 64, 1, 1, 1, 4), ObjectIdentifier()).setMaxAccess("readonly") if mibBuilder.loadTexts: smLangVendor.setDescription('An object identifier which identifies the vendor who\n provides the implementation of the language. This object\n identifier SHALL point to the object identifier directly\n below the enterprise object identifier {1 3 6 1 4 1}\n allocated for the vendor. The value must be the object\n identifier {0 0} if the vendor is not known.') smLangRevision = MibTableColumn((1, 3, 6, 1, 2, 1, 64, 1, 1, 1, 5), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0,32))).setMaxAccess("readonly") if mibBuilder.loadTexts: smLangRevision.setDescription('The version number of the language implementation.\n The value of this object must be an empty string if\n version number of the implementation is unknown.\n\n It is suggested that the value consist of one or more\n decimal numbers separated by dots, where the first\n number is called the major version number.') smLangDescr = MibTableColumn((1, 3, 6, 1, 2, 1, 64, 1, 1, 1, 6), SnmpAdminString()).setMaxAccess("readonly") if mibBuilder.loadTexts: smLangDescr.setDescription('A textual description of the language.') smExtsnTable = MibTable((1, 3, 6, 1, 2, 1, 64, 1, 2), ) if mibBuilder.loadTexts: smExtsnTable.setDescription('This table lists supported language extensions.') smExtsnEntry = MibTableRow((1, 3, 6, 1, 2, 1, 64, 1, 2, 1), ).setIndexNames((0, "DISMAN-SCRIPT-MIB", "smLangIndex"), (0, "DISMAN-SCRIPT-MIB", "smExtsnIndex")) if mibBuilder.loadTexts: smExtsnEntry.setDescription('An entry describing a particular language extension.') smExtsnIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 64, 1, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1,2147483647))) if mibBuilder.loadTexts: smExtsnIndex.setDescription("The locally arbitrary, but unique identifier associated\n with this language extension entry.\n\n The value is expected to remain constant at least from one\n re-initialization of the entity's network management system\n to the next re-initialization.") smExtsnExtension = MibTableColumn((1, 3, 6, 1, 2, 1, 64, 1, 2, 1, 2), ObjectIdentifier()).setMaxAccess("readonly") if mibBuilder.loadTexts: smExtsnExtension.setDescription('The globally unique identification of the language\n extension.') smExtsnVersion = MibTableColumn((1, 3, 6, 1, 2, 1, 64, 1, 2, 1, 3), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0,32))).setMaxAccess("readonly") if mibBuilder.loadTexts: smExtsnVersion.setDescription('The version number of the language extension.\n It is suggested that the version number consist of one or\n more decimal numbers separated by dots, where the first\n number is called the major version number.') smExtsnVendor = MibTableColumn((1, 3, 6, 1, 2, 1, 64, 1, 2, 1, 4), ObjectIdentifier()).setMaxAccess("readonly") if mibBuilder.loadTexts: smExtsnVendor.setDescription('An object identifier which identifies the vendor who\n provides the implementation of the extension. The\n object identifier value should point to the OID node\n directly below the enterprise OID {1 3 6 1 4 1}\n allocated for the vendor. The value must by the object\n identifier {0 0} if the vendor is not known.') smExtsnRevision = MibTableColumn((1, 3, 6, 1, 2, 1, 64, 1, 2, 1, 5), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0,32))).setMaxAccess("readonly") if mibBuilder.loadTexts: smExtsnRevision.setDescription('The version number of the extension implementation.\n The value of this object must be an empty string if\n version number of the implementation is unknown.\n\n It is suggested that the value consist of one or more\n decimal numbers separated by dots, where the first\n number is called the major version number.') smExtsnDescr = MibTableColumn((1, 3, 6, 1, 2, 1, 64, 1, 2, 1, 6), SnmpAdminString()).setMaxAccess("readonly") if mibBuilder.loadTexts: smExtsnDescr.setDescription('A textual description of the language extension.') smScriptObjects = MibIdentifier((1, 3, 6, 1, 2, 1, 64, 1, 3)) smScriptTable = MibTable((1, 3, 6, 1, 2, 1, 64, 1, 3, 1), ) if mibBuilder.loadTexts: smScriptTable.setDescription('This table lists and describes locally known scripts.') smScriptEntry = MibTableRow((1, 3, 6, 1, 2, 1, 64, 1, 3, 1, 1), ).setIndexNames((0, "DISMAN-SCRIPT-MIB", "smScriptOwner"), (0, "DISMAN-SCRIPT-MIB", "smScriptName")) if mibBuilder.loadTexts: smScriptEntry.setDescription('An entry describing a particular script. Every script that\n is stored in non-volatile memory is required to appear in\n this script table.') smScriptOwner = MibTableColumn((1, 3, 6, 1, 2, 1, 64, 1, 3, 1, 1, 1), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0,32))) if mibBuilder.loadTexts: smScriptOwner.setDescription('The manager who owns this row in the smScriptTable.') smScriptName = MibTableColumn((1, 3, 6, 1, 2, 1, 64, 1, 3, 1, 1, 2), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(1,32))) if mibBuilder.loadTexts: smScriptName.setDescription('The locally-unique, administratively assigned name for this\n script. This object allows an smScriptOwner to have multiple\n entries in the smScriptTable.\n\n This value of this object may be used to derive the name\n (e.g. a file name) which is used by the Script MIB\n implementation to access the script in non-volatile\n storage. The details of this mapping are implementation\n specific. However, the mapping needs to ensure that scripts\n created by different owners with the same script name do not\n map to the same name in non-volatile storage.') smScriptDescr = MibTableColumn((1, 3, 6, 1, 2, 1, 64, 1, 3, 1, 1, 3), SnmpAdminString()).setMaxAccess("readcreate") if mibBuilder.loadTexts: smScriptDescr.setDescription('A description of the purpose of the script.') smScriptLanguage = MibTableColumn((1, 3, 6, 1, 2, 1, 64, 1, 3, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0,2147483647))).setMaxAccess("readcreate") if mibBuilder.loadTexts: smScriptLanguage.setDescription("The value of this object type identifies an entry in the\n smLangTable which is used to execute this script.\n The special value 0 may be used by hard-wired scripts\n that can not be modified and that are executed by\n internal functions.\n\n Set requests to change this object are invalid if the\n value of smScriptOperStatus is `enabled' or `compiling'\n and will result in an inconsistentValue error.\n\n Note that the data type and the range of this object must\n be consistent with the definition of smLangIndex.") smScriptSource = MibTableColumn((1, 3, 6, 1, 2, 1, 64, 1, 3, 1, 1, 5), DisplayString().clone(hexValue="")).setMaxAccess("readcreate") if mibBuilder.loadTexts: smScriptSource.setDescription("This object either contains a reference to the script\n source or an empty string. A reference must be given\n in the form of a Uniform Resource Locator (URL) as\n defined in RFC 2396. The allowed character sets and the\n encoding rules defined in RFC 2396 section 2 apply.\n\n When the smScriptAdminStatus object is set to `enabled',\n the Script MIB implementation will `pull' the script\n source from the URL contained in this object if the URL\n is not empty.\n\n An empty URL indicates that the script source is loaded\n from local storage. The script is read from the smCodeTable\n if the value of smScriptStorageType is volatile. Otherwise,\n the script is read from non-volatile storage.\n\n Note: This document does not mandate implementation of any\n specific URL scheme. An attempt to load a script from a\n nonsupported URL scheme will cause the smScriptOperStatus\n to report an `unknownProtocol' error.\n\n\n Set requests to change this object are invalid if the\n value of smScriptOperStatus is `enabled', `editing',\n `retrieving' or `compiling' and will result in an\n inconsistentValue error.") smScriptAdminStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 64, 1, 3, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3,))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2), ("editing", 3),)).clone('disabled')).setMaxAccess("readcreate") if mibBuilder.loadTexts: smScriptAdminStatus.setDescription("The value of this object indicates the desired status of\n the script. See the definition of smScriptOperStatus for\n a description of the values.\n\n When the smScriptAdminStatus object is set to `enabled' and\n the smScriptOperStatus is `disabled' or one of the error\n states, the Script MIB implementation will `pull' the script\n source from the URL contained in the smScriptSource object\n if the URL is not empty.") smScriptOperStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 64, 1, 3, 1, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2), ("editing", 3), ("retrieving", 4), ("compiling", 5), ("noSuchScript", 6), ("accessDenied", 7), ("wrongLanguage", 8), ("wrongVersion", 9), ("compilationFailed", 10), ("noResourcesLeft", 11), ("unknownProtocol", 12), ("protocolFailure", 13), ("genericError", 14),)).clone('disabled')).setMaxAccess("readonly") if mibBuilder.loadTexts: smScriptOperStatus.setDescription("The actual status of the script in the runtime system. The\n value of this object is only meaningful when the value of\n the smScriptRowStatus object is `active'.\n\n The smScriptOperStatus object may have the following values:\n\n - `enabled' indicates that the script is available and can\n be started by a launch table entry.\n\n - `disabled' indicates that the script can not be used.\n\n - `editing' indicates that the script can be modified in the\n smCodeTable.\n\n - `retrieving' indicates that the script is currently being\n loaded from non-volatile storage or a remote system.\n\n - `compiling' indicates that the script is currently being\n compiled by the runtime system.\n\n - `noSuchScript' indicates that the script does not exist\n at the smScriptSource.\n\n - `accessDenied' indicates that the script can not be loaded\n from the smScriptSource due to a lack of permissions.\n\n - `wrongLanguage' indicates that the script can not be\n loaded from the smScriptSource because of a language\n mismatch.\n\n - `wrongVersion' indicates that the script can not be loaded\n from the smScriptSource because of a language version\n mismatch.\n\n - `compilationFailed' indicates that the compilation failed.\n\n - `noResourcesLeft' indicates that the runtime system does\n not have enough resources to load the script.\n\n - `unknownProtocol' indicates that the script could not be\n loaded from the smScriptSource because the requested\n protocol is not supported.\n\n - `protocolFailure' indicates that the script could not be\n loaded from the smScriptSource because of a protocol\n failure.\n\n - `genericError' indicates that the script could not be\n\n loaded due to an error condition not listed above.\n\n The `retrieving' and `compiling' states are transient states\n which will either lead to one of the error states or the\n `enabled' state. The `disabled' and `editing' states are\n administrative states which are only reached by explicit\n management operations.\n\n All launch table entries that refer to this script table\n entry shall have an smLaunchOperStatus value of `disabled'\n when the value of this object is not `enabled'.") smScriptStorageType = MibTableColumn((1, 3, 6, 1, 2, 1, 64, 1, 3, 1, 1, 8), StorageType().clone('volatile')).setMaxAccess("readcreate") if mibBuilder.loadTexts: smScriptStorageType.setDescription("This object defines whether this row and the script\n controlled by this row are kept in volatile storage and\n lost upon reboot or if this row is backed up by\n non-volatile or permanent storage.\n\n The storage type of this row always complies with the value\n of this entry if the value of the corresponding RowStatus\n object is `active'.\n\n However, the storage type of the script controlled by this\n row may be different, if the value of this entry is\n `non-volatile'. The script controlled by this row is written\n into local non-volatile storage if the following condition\n becomes true:\n\n (a) the URL contained in the smScriptSource object is empty\n and\n (b) the smScriptStorageType is `nonVolatile'\n and\n (c) the smScriptOperStatus is `enabled'\n\n Setting this object to `volatile' removes a script from\n non-volatile storage if the script controlled by this row\n has been in non-volatile storage before. Attempts to set\n this object to permanent will always fail with an\n inconsistentValue error.\n\n The value of smScriptStorageType is only meaningful if the\n value of the corresponding RowStatus object is `active'.\n\n If smScriptStorageType has the value permanent(4), then all\n objects whose MAX-ACCESS value is read-create must be\n writable, with the exception of the smScriptStorageType and\n smScriptRowStatus objects, which shall be read-only.") smScriptRowStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 64, 1, 3, 1, 1, 9), RowStatus()).setMaxAccess("readcreate") if mibBuilder.loadTexts: smScriptRowStatus.setDescription("A control that allows entries to be added and removed from\n this table.\n\n Changing the smScriptRowStatus from `active' to\n `notInService' will remove the associated script from the\n runtime system.\n\n Deleting conceptual rows from this table may affect the\n deletion of other resources associated with this row. For\n example, a script stored in non-volatile storage may be\n removed from non-volatile storage.\n\n An entry may not exist in the `active' state unless all\n required objects in the entry have appropriate values. Rows\n that are not complete or not in service are not known by the\n script runtime system.\n\n Attempts to `destroy' a row or to set a row `notInService'\n while the smScriptOperStatus is `enabled' will result in an\n inconsistentValue error.\n\n Attempts to `destroy' a row or to set a row `notInService'\n where the value of the smScriptStorageType object is\n `permanent' or `readOnly' will result in an\n inconsistentValue error.\n\n The value of this object has no effect on whether other\n objects in this conceptual row can be modified.") smScriptError = MibTableColumn((1, 3, 6, 1, 2, 1, 64, 1, 3, 1, 1, 10), SnmpAdminString().clone(hexValue="")).setMaxAccess("readonly") if mibBuilder.loadTexts: smScriptError.setDescription("This object contains a descriptive error message if the\n\n transition into the operational status `enabled' failed.\n Implementations must reset the error message to a\n zero-length string when a new attempt to change the\n script status to `enabled' is started.") smScriptLastChange = MibTableColumn((1, 3, 6, 1, 2, 1, 64, 1, 3, 1, 1, 11), DateAndTime().clone(hexValue="0000000000000000")).setMaxAccess("readonly") if mibBuilder.loadTexts: smScriptLastChange.setDescription("The date and time when this script table entry was last\n modified. The value '0000000000000000'H is returned if\n the script table entry has not yet been modified.\n\n Note that the resetting of smScriptError is not considered\n a change of the script table entry.") smCodeTable = MibTable((1, 3, 6, 1, 2, 1, 64, 1, 3, 2), ) if mibBuilder.loadTexts: smCodeTable.setDescription('This table contains the script code for scripts that are\n written via SNMP write operations.') smCodeEntry = MibTableRow((1, 3, 6, 1, 2, 1, 64, 1, 3, 2, 1), ).setIndexNames((0, "DISMAN-SCRIPT-MIB", "smScriptOwner"), (0, "DISMAN-SCRIPT-MIB", "smScriptName"), (0, "DISMAN-SCRIPT-MIB", "smCodeIndex")) if mibBuilder.loadTexts: smCodeEntry.setDescription('An entry describing a particular fragment of a script.') smCodeIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 64, 1, 3, 2, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1,4294967295))) if mibBuilder.loadTexts: smCodeIndex.setDescription('The index value identifying this code fragment.') smCodeText = MibTableColumn((1, 3, 6, 1, 2, 1, 64, 1, 3, 2, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1,1024))).setMaxAccess("readcreate") if mibBuilder.loadTexts: smCodeText.setDescription('The code that makes up a fragment of a script. The format\n of this code fragment depends on the script language which\n is identified by the associated smScriptLanguage object.') smCodeRowStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 64, 1, 3, 2, 1, 3), RowStatus()).setMaxAccess("readcreate") if mibBuilder.loadTexts: smCodeRowStatus.setDescription('A control that allows entries to be added and removed from\n this table.\n\n The value of this object has no effect on whether other\n objects in this conceptual row can be modified.') smRunObjects = MibIdentifier((1, 3, 6, 1, 2, 1, 64, 1, 4)) smLaunchTable = MibTable((1, 3, 6, 1, 2, 1, 64, 1, 4, 1), ) if mibBuilder.loadTexts: smLaunchTable.setDescription('This table lists and describes scripts that are ready\n to be executed together with their parameters.') smLaunchEntry = MibTableRow((1, 3, 6, 1, 2, 1, 64, 1, 4, 1, 1), ).setIndexNames((0, "DISMAN-SCRIPT-MIB", "smLaunchOwner"), (0, "DISMAN-SCRIPT-MIB", "smLaunchName")) if mibBuilder.loadTexts: smLaunchEntry.setDescription('An entry describing a particular executable script.') smLaunchOwner = MibTableColumn((1, 3, 6, 1, 2, 1, 64, 1, 4, 1, 1, 1), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0,32))) if mibBuilder.loadTexts: smLaunchOwner.setDescription('The manager who owns this row in the smLaunchTable. Every\n instance of a running script started from a particular entry\n in the smLaunchTable (i.e. entries in the smRunTable) will\n be owned by the same smLaunchOwner used to index the entry\n in the smLaunchTable. This owner is not necessarily the same\n as the owner of the script itself (smLaunchScriptOwner).') smLaunchName = MibTableColumn((1, 3, 6, 1, 2, 1, 64, 1, 4, 1, 1, 2), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(1,32))) if mibBuilder.loadTexts: smLaunchName.setDescription('The locally-unique, administratively assigned name for this\n launch table entry. This object allows an smLaunchOwner to\n have multiple entries in the smLaunchTable. The smLaunchName\n is an arbitrary name that must be different from any other\n smLaunchTable entries with the same smLaunchOwner but can be\n the same as other entries in the smLaunchTable with\n different smLaunchOwner values. Note that the value of\n smLaunchName is not related in any way to the name of the\n script being launched.') smLaunchScriptOwner = MibTableColumn((1, 3, 6, 1, 2, 1, 64, 1, 4, 1, 1, 3), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0,32))).setMaxAccess("readcreate") if mibBuilder.loadTexts: smLaunchScriptOwner.setDescription("The value of this object in combination with the value of\n smLaunchScriptName identifies the script that can be\n launched from this smLaunchTable entry. Attempts to write\n this object will fail with an inconsistentValue error if\n the value of smLaunchOperStatus is `enabled'.") smLaunchScriptName = MibTableColumn((1, 3, 6, 1, 2, 1, 64, 1, 4, 1, 1, 4), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0,32)).clone(hexValue="")).setMaxAccess("readcreate") if mibBuilder.loadTexts: smLaunchScriptName.setDescription("The value of this object in combination with the value of\n the smLaunchScriptOwner identifies the script that can be\n launched from this smLaunchTable entry. The zero-length\n string may be used to point to a non-existing script.\n\n Attempts to write this object will fail with an\n inconsistentValue error if the value of smLaunchOperStatus\n is `enabled'.") smLaunchArgument = MibTableColumn((1, 3, 6, 1, 2, 1, 64, 1, 4, 1, 1, 5), OctetString().clone(hexValue="")).setMaxAccess("readcreate") if mibBuilder.loadTexts: smLaunchArgument.setDescription('The argument supplied to the script. When a script is\n invoked, the value of this object is used to initialize\n the smRunArgument object.') smLaunchMaxRunning = MibTableColumn((1, 3, 6, 1, 2, 1, 64, 1, 4, 1, 1, 6), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1,4294967295)).clone(1)).setMaxAccess("readcreate") if mibBuilder.loadTexts: smLaunchMaxRunning.setDescription('The maximum number of concurrently running scripts that may\n be invoked from this entry in the smLaunchTable. Lowering\n the current value of this object does not affect any scripts\n that are already executing.') smLaunchMaxCompleted = MibTableColumn((1, 3, 6, 1, 2, 1, 64, 1, 4, 1, 1, 7), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1,4294967295)).clone(1)).setMaxAccess("readcreate") if mibBuilder.loadTexts: smLaunchMaxCompleted.setDescription('The maximum number of finished scripts invoked from this\n entry in the smLaunchTable allowed to be retained in the\n smRunTable. Whenever the value of this object is changed\n and whenever a script terminates, entries in the smRunTable\n are deleted if necessary until the number of completed\n scripts is smaller than the value of this object. Scripts\n whose smRunEndTime value indicates the oldest completion\n time are deleted first.') smLaunchLifeTime = MibTableColumn((1, 3, 6, 1, 2, 1, 64, 1, 4, 1, 1, 8), TimeInterval().clone(360000)).setUnits('centi-seconds').setMaxAccess("readcreate") if mibBuilder.loadTexts: smLaunchLifeTime.setDescription('The default maximum amount of time a script launched\n from this entry may run. The value of this object is used\n to initialize the smRunLifeTime object when a script is\n launched. Changing the value of an smLaunchLifeTime\n instance does not affect scripts previously launched from\n\n this entry.') smLaunchExpireTime = MibTableColumn((1, 3, 6, 1, 2, 1, 64, 1, 4, 1, 1, 9), TimeInterval().clone(360000)).setUnits('centi-seconds').setMaxAccess("readcreate") if mibBuilder.loadTexts: smLaunchExpireTime.setDescription('The default maximum amount of time information about a\n script launched from this entry is kept in the smRunTable\n after the script has completed execution. The value of\n this object is used to initialize the smRunExpireTime\n object when a script is launched. Changing the value of an\n smLaunchExpireTime instance does not affect scripts\n previously launched from this entry.') smLaunchStart = MibTableColumn((1, 3, 6, 1, 2, 1, 64, 1, 4, 1, 1, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0,2147483647))).setMaxAccess("readcreate") if mibBuilder.loadTexts: smLaunchStart.setDescription("This object is used to start the execution of scripts.\n When retrieved, the value will be the value of smRunIndex\n for the last script that started execution by manipulating\n this object. The value will be zero if no script started\n execution yet.\n\n A script is started by setting this object to an unused\n smRunIndex value. A new row in the smRunTable will be\n created which is indexed by the value supplied by the\n set-request in addition to the value of smLaunchOwner and\n smLaunchName. An unused value can be obtained by reading\n the smLaunchRunIndexNext object.\n\n Setting this object to the special value 0 will start\n the script with a self-generated smRunIndex value. The\n consequence is that the script invoker has no reliable\n way to determine the smRunIndex value for this script\n invocation and that the invoker has therefore no way\n to obtain the results from this script invocation. The\n special value 0 is however useful for scheduled script\n invocations.\n\n If this object is set, the following checks must be\n\n performed:\n\n 1) The value of the smLaunchOperStatus object in this\n entry of the smLaunchTable must be `enabled'.\n 2) The values of smLaunchScriptOwner and\n smLaunchScriptName of this row must identify an\n existing entry in the smScriptTable.\n 3) The value of smScriptOperStatus of this entry must\n be `enabled'.\n 4) The principal performing the set operation must have\n read access to the script. This must be checked by\n calling the isAccessAllowed abstract service interface\n defined in RFC 2271 on the row in the smScriptTable\n identified by smLaunchScriptOwner and smLaunchScriptName.\n The isAccessAllowed abstract service interface must be\n called on all columnar objects in the smScriptTable with\n a MAX-ACCESS value different than `not-accessible'. The\n test fails as soon as a call indicates that access is\n not allowed.\n 5) If the value provided by the set operation is not 0,\n a check must be made that the value is currently not\n in use. Otherwise, if the value provided by the set\n operation is 0, a suitable unused value must be\n generated.\n 6) The number of currently executing scripts invoked\n from this smLaunchTable entry must be less than\n smLaunchMaxRunning.\n\n Attempts to start a script will fail with an\n inconsistentValue error if one of the checks described\n above fails.\n\n Otherwise, if all checks have been passed, a new entry\n in the smRunTable will be created indexed by smLaunchOwner,\n smLaunchName and the new value for smRunIndex. The value\n of smLaunchArgument will be copied into smRunArgument,\n the value of smLaunchLifeTime will be copied to\n smRunLifeTime, and the value of smLaunchExpireTime\n will be copied to smRunExpireTime.\n\n The smRunStartTime will be set to the current time and\n the smRunState will be set to `initializing' before the\n script execution is initiated in the appropriate runtime\n system.\n\n Note that the data type and the range of this object must\n be consistent with the smRunIndex object. Since this\n object might be written from the scheduling MIB, the\n\n data type Integer32 rather than Unsigned32 is used.") smLaunchControl = MibTableColumn((1, 3, 6, 1, 2, 1, 64, 1, 4, 1, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4,))).clone(namedValues=NamedValues(("abort", 1), ("suspend", 2), ("resume", 3), ("nop", 4),)).clone('nop')).setMaxAccess("readcreate") if mibBuilder.loadTexts: smLaunchControl.setDescription("This object is used to request a state change for all\n running scripts in the smRunTable that were started from\n this row in the smLaunchTable.\n\n Setting this object to abort(1), suspend(2) or resume(3)\n will set the smRunControl object of all applicable rows\n in the smRunTable to abort(1), suspend(2) or resume(3)\n respectively. The phrase `applicable rows' means the set of\n rows which were created from this entry in the smLaunchTable\n and whose value of smRunState allows the corresponding\n state change as described in the definition of the\n smRunControl object. Setting this object to nop(4) has no\n effect.\n\n Attempts to set this object lead to an inconsistentValue\n error only if all implicated sets on all the applicable\n rows lead to inconsistentValue errors. It is not allowed\n to return an inconsistentValue error if at least one state\n change on one of the applicable rows was successful.") smLaunchAdminStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 64, 1, 4, 1, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3,))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2), ("autostart", 3),)).clone('disabled')).setMaxAccess("readcreate") if mibBuilder.loadTexts: smLaunchAdminStatus.setDescription("The value of this object indicates the desired status of\n this launch table entry. The values enabled(1) and\n autostart(3) both indicate that the launch table entry\n\n should transition into the operational enabled(1) state as\n soon as the associated script table entry is enabled(1).\n\n The value autostart(3) further indicates that the script\n is started automatically by conceptually writing the\n value 0 into the associated smLaunchStart object during\n the transition from the `disabled' into the `enabled'\n operational state. This is useful for scripts that are\n to be launched on system start-up.") smLaunchOperStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 64, 1, 4, 1, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3,))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2), ("expired", 3),)).clone('disabled')).setMaxAccess("readonly") if mibBuilder.loadTexts: smLaunchOperStatus.setDescription("The value of this object indicates the actual status of\n this launch table entry. The smLaunchOperStatus object\n may have the following values:\n\n - `enabled' indicates that the launch table entry is\n available and can be used to start scripts.\n\n - `disabled' indicates that the launch table entry can\n not be used to start scripts.\n\n - `expired' indicates that the launch table entry can\n not be used to start scripts and will disappear as\n soon as all smRunTable entries associated with this\n launch table entry have disappeared.\n\n The value `enabled' requires that the smLaunchRowStatus\n object is active. The value `disabled' requires that there\n are no entries in the smRunTable associated with this\n smLaunchTable entry.") smLaunchRunIndexNext = MibTableColumn((1, 3, 6, 1, 2, 1, 64, 1, 4, 1, 1, 14), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1,2147483647))).setMaxAccess("readonly") if mibBuilder.loadTexts: smLaunchRunIndexNext.setDescription('This variable is used for creating rows in the smRunTable.\n The value of this variable is a currently unused value\n for smRunIndex, which can be written into the smLaunchStart\n object associated with this row to launch a script.\n\n The value returned when reading this variable must be unique\n for the smLaunchOwner and smLaunchName associated with this\n row. Subsequent attempts to read this variable must return\n different values.\n\n This variable will return the special value 0 if no new rows\n can be created.\n\n Note that the data type and the range of this object must be\n consistent with the definition of smRunIndex.') smLaunchStorageType = MibTableColumn((1, 3, 6, 1, 2, 1, 64, 1, 4, 1, 1, 15), StorageType().clone('volatile')).setMaxAccess("readcreate") if mibBuilder.loadTexts: smLaunchStorageType.setDescription('This object defines if this row is kept in volatile storage\n and lost upon reboot or if this row is backed up by stable\n storage.\n\n The value of smLaunchStorageType is only meaningful if the\n value of the corresponding RowStatus object is active.\n\n If smLaunchStorageType has the value permanent(4), then all\n objects whose MAX-ACCESS value is read-create must be\n writable, with the exception of the smLaunchStorageType and\n smLaunchRowStatus objects, which shall be read-only.') smLaunchRowStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 64, 1, 4, 1, 1, 16), RowStatus()).setMaxAccess("readcreate") if mibBuilder.loadTexts: smLaunchRowStatus.setDescription("A control that allows entries to be added and removed from\n this table.\n\n Attempts to `destroy' a row or to set a row `notInService'\n while the smLaunchOperStatus is `enabled' will result in\n an inconsistentValue error.\n\n\n Attempts to `destroy' a row or to set a row `notInService'\n where the value of the smLaunchStorageType object is\n `permanent' or `readOnly' will result in an\n inconsistentValue error.\n\n The value of this object has no effect on whether other\n objects in this conceptual row can be modified.") smLaunchError = MibTableColumn((1, 3, 6, 1, 2, 1, 64, 1, 4, 1, 1, 17), SnmpAdminString().clone(hexValue="")).setMaxAccess("readonly") if mibBuilder.loadTexts: smLaunchError.setDescription('This object contains a descriptive error message if an\n attempt to launch a script fails. Implementations must reset\n the error message to a zero-length string when a new attempt\n to launch a script is started.') smLaunchLastChange = MibTableColumn((1, 3, 6, 1, 2, 1, 64, 1, 4, 1, 1, 18), DateAndTime().clone(hexValue="0000000000000000")).setMaxAccess("readonly") if mibBuilder.loadTexts: smLaunchLastChange.setDescription("The date and time when this launch table entry was last\n modified. The value '0000000000000000'H is returned if\n the launch table entry has not yet been modified.\n\n Note that a change of smLaunchStart, smLaunchControl,\n smLaunchRunIndexNext, smLaunchRowExpireTime, or the\n resetting of smLaunchError is not considered a change\n of this launch table entry.") smLaunchRowExpireTime = MibTableColumn((1, 3, 6, 1, 2, 1, 64, 1, 4, 1, 1, 19), TimeInterval().clone(2147483647)).setUnits('centi-seconds').setMaxAccess("readcreate") if mibBuilder.loadTexts: smLaunchRowExpireTime.setDescription("The value of this object specifies how long this row remains\n in the `enabled' or `disabled' operational state. The value\n reported by this object ticks backwards. When the value\n reaches 0, it stops ticking backward and the row is\n deleted if there are no smRunTable entries associated with\n\n this smLaunchTable entry. Otherwise, the smLaunchOperStatus\n changes to `expired' and the row deletion is deferred\n until there are no smRunTable entries associated with this\n smLaunchTable entry.\n\n The smLaunchRowExpireTime will not tick backwards if it is\n set to its maximum value (2147483647). In other words,\n setting this object to its maximum value turns the timer\n off.\n\n The value of this object may be set in order to increase\n or reduce the remaining time that the launch table entry\n may be used. Setting the value to 0 will cause an immediate\n row deletion or transition into the `expired' operational\n state.\n\n It is not possible to set this object while the operational\n status is `expired'. Attempts to modify this object while\n the operational status is `expired' leads to an\n inconsistentValue error.\n\n Note that the timer ticks backwards independent of the\n operational state of the launch table entry.") smRunTable = MibTable((1, 3, 6, 1, 2, 1, 64, 1, 4, 2), ) if mibBuilder.loadTexts: smRunTable.setDescription('This table lists and describes scripts that are currently\n running or have been running in the past.') smRunEntry = MibTableRow((1, 3, 6, 1, 2, 1, 64, 1, 4, 2, 1), ).setIndexNames((0, "DISMAN-SCRIPT-MIB", "smLaunchOwner"), (0, "DISMAN-SCRIPT-MIB", "smLaunchName"), (0, "DISMAN-SCRIPT-MIB", "smRunIndex")) if mibBuilder.loadTexts: smRunEntry.setDescription('An entry describing a particular running or finished\n script.') smRunIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 64, 1, 4, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1,2147483647))) if mibBuilder.loadTexts: smRunIndex.setDescription('The locally arbitrary, but unique identifier associated\n with this running or finished script. This value must be\n unique for all rows in the smRunTable with the same\n smLaunchOwner and smLaunchName.\n\n Note that the data type and the range of this object must\n be consistent with the definition of smLaunchRunIndexNext\n and smLaunchStart.') smRunArgument = MibTableColumn((1, 3, 6, 1, 2, 1, 64, 1, 4, 2, 1, 2), OctetString().clone(hexValue="")).setMaxAccess("readonly") if mibBuilder.loadTexts: smRunArgument.setDescription('The argument supplied to the script when it started.') smRunStartTime = MibTableColumn((1, 3, 6, 1, 2, 1, 64, 1, 4, 2, 1, 3), DateAndTime().clone(hexValue="0000000000000000")).setMaxAccess("readonly") if mibBuilder.loadTexts: smRunStartTime.setDescription("The date and time when the execution started. The value\n '0000000000000000'H is returned if the script has not\n started yet.") smRunEndTime = MibTableColumn((1, 3, 6, 1, 2, 1, 64, 1, 4, 2, 1, 4), DateAndTime().clone(hexValue="0000000000000000")).setMaxAccess("readonly") if mibBuilder.loadTexts: smRunEndTime.setDescription("The date and time when the execution terminated. The value\n '0000000000000000'H is returned if the script has not\n terminated yet.") smRunLifeTime = MibTableColumn((1, 3, 6, 1, 2, 1, 64, 1, 4, 2, 1, 5), TimeInterval()).setUnits('centi-seconds').setMaxAccess("readwrite") if mibBuilder.loadTexts: smRunLifeTime.setDescription("This object specifies how long the script can execute.\n This object returns the remaining time that the script\n may run. The object is initialized with the value of the\n associated smLaunchLifeTime object and ticks backwards.\n The script is aborted immediately when the value reaches 0.\n\n The value of this object may be set in order to increase or\n reduce the remaining time that the script may run. Setting\n this value to 0 will abort script execution immediately,\n and, if the value of smRunExpireTime is also 0, will remove\n this entry from the smRunTable once it has terminated.\n\n If smRunLifeTime is set to its maximum value (2147483647),\n either by a set operation or by its initialization from the\n smLaunchLifeTime object, then it will not tick backwards.\n A running script with a maximum smRunLifeTime value will\n thus never be terminated with a `lifeTimeExceeded' exit\n code.\n\n The value of smRunLifeTime reflects the real-time execution\n time as seen by the outside world. The value of this object\n will always be 0 for a script that finished execution, that\n is smRunState has the value `terminated'.\n\n The value of smRunLifeTime does not change while a script\n is suspended, that is smRunState has the value `suspended'.\n Note that this does not affect set operations. It is legal\n to modify smRunLifeTime via set operations while a script\n is suspended.") smRunExpireTime = MibTableColumn((1, 3, 6, 1, 2, 1, 64, 1, 4, 2, 1, 6), TimeInterval()).setUnits('centi-seconds').setMaxAccess("readwrite") if mibBuilder.loadTexts: smRunExpireTime.setDescription("The value of this object specifies how long this row can\n exist in the smRunTable after the script has terminated.\n This object returns the remaining time that the row may\n exist before it is aged out. The object is initialized with\n the value of the associated smLaunchExpireTime object and\n ticks backwards. The entry in the smRunTable is destroyed\n when the value reaches 0 and the smRunState has the value\n `terminated'.\n\n The value of this object may be set in order to increase or\n reduce the remaining time that the row may exist. Setting\n the value to 0 will destroy this entry as soon as the\n smRunState has the value `terminated'.") smRunExitCode = MibTableColumn((1, 3, 6, 1, 2, 1, 64, 1, 4, 2, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9,))).clone(namedValues=NamedValues(("noError", 1), ("halted", 2), ("lifeTimeExceeded", 3), ("noResourcesLeft", 4), ("languageError", 5), ("runtimeError", 6), ("invalidArgument", 7), ("securityViolation", 8), ("genericError", 9),)).clone('noError')).setMaxAccess("readonly") if mibBuilder.loadTexts: smRunExitCode.setDescription("The value of this object indicates the reason why a\n script finished execution. The smRunExitCode code may have\n one of the following values:\n\n - `noError', which indicates that the script completed\n successfully without errors;\n\n - `halted', which indicates that the script was halted\n by a request from an authorized manager;\n\n - `lifeTimeExceeded', which indicates that the script\n exited because a time limit was exceeded;\n\n - `noResourcesLeft', which indicates that the script\n exited because it ran out of resources (e.g. memory);\n\n - `languageError', which indicates that the script exited\n because of a language error (e.g. a syntax error in an\n interpreted language);\n\n - `runtimeError', which indicates that the script exited\n due to a runtime error (e.g. a division by zero);\n\n - `invalidArgument', which indicates that the script could\n not be run because of invalid script arguments;\n\n - `securityViolation', which indicates that the script\n exited due to a security violation;\n\n - `genericError', which indicates that the script exited\n for an unspecified reason.\n\n If the script has not yet begun running, or is currently\n running, the value will be `noError'.") smRunResult = MibTableColumn((1, 3, 6, 1, 2, 1, 64, 1, 4, 2, 1, 8), OctetString().clone(hexValue="")).setMaxAccess("readonly") if mibBuilder.loadTexts: smRunResult.setDescription('The result value produced by the running script. Note that\n the result may change while the script is executing.') smRunControl = MibTableColumn((1, 3, 6, 1, 2, 1, 64, 1, 4, 2, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4,))).clone(namedValues=NamedValues(("abort", 1), ("suspend", 2), ("resume", 3), ("nop", 4),)).clone('nop')).setMaxAccess("readwrite") if mibBuilder.loadTexts: smRunControl.setDescription("The value of this object indicates the desired status of the\n script execution defined by this row.\n\n Setting this object to `abort' will abort execution if the\n\n value of smRunState is `initializing', `executing',\n `suspending', `suspended' or `resuming'. Setting this object\n to `abort' when the value of smRunState is `aborting' or\n `terminated', or if the implementation can determine that\n the attempt to abort the execution would fail, will result\n in an inconsistentValue error.\n\n Setting this object to `suspend' will suspend execution\n if the value of smRunState is `executing'. Setting this\n object to `suspend' will cause an inconsistentValue error\n if the value of smRunState is not `executing' or if the\n implementation can determine that the attempt to suspend\n the execution would fail.\n\n Setting this object to `resume' will resume execution\n if the value of smRunState is `suspending' or\n `suspended'. Setting this object to `resume' will cause an\n inconsistentValue error if the value of smRunState is\n not `suspended' or if the implementation can determine\n that the attempt to resume the execution would fail.\n\n Setting this object to nop(4) has no effect.") smRunState = MibTableColumn((1, 3, 6, 1, 2, 1, 64, 1, 4, 2, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7,))).clone(namedValues=NamedValues(("initializing", 1), ("executing", 2), ("suspending", 3), ("suspended", 4), ("resuming", 5), ("aborting", 6), ("terminated", 7),))).setMaxAccess("readonly") if mibBuilder.loadTexts: smRunState.setDescription("The value of this object indicates the script's execution\n state. If the script has been invoked but has not yet\n begun execution, the value will be `initializing'. If the\n script is running, the value will be `executing'.\n\n A running script which received a request to suspend\n execution first transitions into a temporary `suspending'\n state. The temporary `suspending' state changes to\n `suspended' when the script has actually been suspended. The\n temporary `suspending' state changes back to `executing' if\n\n the attempt to suspend the running script fails.\n\n A suspended script which received a request to resume\n execution first transitions into a temporary `resuming'\n state. The temporary `resuming' state changes to `running'\n when the script has actually been resumed. The temporary\n `resuming' state changes back to `suspended' if the attempt\n to resume the suspended script fails.\n\n A script which received a request to abort execution but\n which is still running first transitions into a temporary\n `aborting' state.\n\n A script which has finished its execution is `terminated'.") smRunError = MibTableColumn((1, 3, 6, 1, 2, 1, 64, 1, 4, 2, 1, 11), SnmpAdminString().clone(hexValue="")).setMaxAccess("readonly") if mibBuilder.loadTexts: smRunError.setDescription("This object contains a descriptive error message if the\n script startup or execution raised an abnormal condition.\n An implementation must store a descriptive error message\n in this object if the script exits with the smRunExitCode\n `genericError'.") smRunResultTime = MibTableColumn((1, 3, 6, 1, 2, 1, 64, 1, 4, 2, 1, 12), DateAndTime().clone(hexValue="0000000000000000")).setMaxAccess("readonly") if mibBuilder.loadTexts: smRunResultTime.setDescription("The date and time when the smRunResult was last updated.\n The value '0000000000000000'H is returned if smRunResult\n has not yet been updated after the creation of this\n smRunTable entry.") smRunErrorTime = MibTableColumn((1, 3, 6, 1, 2, 1, 64, 1, 4, 2, 1, 13), DateAndTime().clone(hexValue="0000000000000000")).setMaxAccess("readonly") if mibBuilder.loadTexts: smRunErrorTime.setDescription("The date and time when the smRunError was last updated.\n The value '0000000000000000'H is returned if smRunError\n\n has not yet been updated after the creation of this\n smRunTable entry.") smTraps = MibIdentifier((1, 3, 6, 1, 2, 1, 64, 2, 0)) smScriptAbort = NotificationType((1, 3, 6, 1, 2, 1, 64, 2, 0, 1)).setObjects(*(("DISMAN-SCRIPT-MIB", "smRunExitCode"), ("DISMAN-SCRIPT-MIB", "smRunEndTime"), ("DISMAN-SCRIPT-MIB", "smRunError"),)) if mibBuilder.loadTexts: smScriptAbort.setDescription("This notification is generated whenever a running script\n terminates with an smRunExitCode unequal to `noError'.") smScriptResult = NotificationType((1, 3, 6, 1, 2, 1, 64, 2, 0, 2)).setObjects(*(("DISMAN-SCRIPT-MIB", "smRunResult"),)) if mibBuilder.loadTexts: smScriptResult.setDescription('This notification can be used by scripts to notify other\n management applications about results produced by the\n script.\n\n This notification is not automatically generated by the\n Script MIB implementation. It is the responsibility of\n the executing script to emit this notification where it\n is appropriate to do so.') smScriptException = NotificationType((1, 3, 6, 1, 2, 1, 64, 2, 0, 3)).setObjects(*(("DISMAN-SCRIPT-MIB", "smRunError"),)) if mibBuilder.loadTexts: smScriptException.setDescription('This notification can be used by scripts to notify other\n management applications about script errors.\n\n This notification is not automatically generated by the\n Script MIB implementation. It is the responsibility of\n the executing script or the runtime system to emit this\n notification where it is appropriate to do so.') smCompliances = MibIdentifier((1, 3, 6, 1, 2, 1, 64, 3, 1)) smGroups = MibIdentifier((1, 3, 6, 1, 2, 1, 64, 3, 2)) smCompliance2 = ModuleCompliance((1, 3, 6, 1, 2, 1, 64, 3, 1, 2)).setObjects(*(("DISMAN-SCRIPT-MIB", "smLanguageGroup"), ("DISMAN-SCRIPT-MIB", "smScriptGroup2"), ("DISMAN-SCRIPT-MIB", "smLaunchGroup2"), ("DISMAN-SCRIPT-MIB", "smRunGroup2"), ("DISMAN-SCRIPT-MIB", "smNotificationsGroup2"), ("DISMAN-SCRIPT-MIB", "smCodeGroup"),)) if mibBuilder.loadTexts: smCompliance2.setDescription('The compliance statement for SNMP entities which implement\n the Script MIB.') smLanguageGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 64, 3, 2, 1)).setObjects(*(("DISMAN-SCRIPT-MIB", "smLangLanguage"), ("DISMAN-SCRIPT-MIB", "smLangVersion"), ("DISMAN-SCRIPT-MIB", "smLangVendor"), ("DISMAN-SCRIPT-MIB", "smLangRevision"), ("DISMAN-SCRIPT-MIB", "smLangDescr"), ("DISMAN-SCRIPT-MIB", "smExtsnExtension"), ("DISMAN-SCRIPT-MIB", "smExtsnVersion"), ("DISMAN-SCRIPT-MIB", "smExtsnVendor"), ("DISMAN-SCRIPT-MIB", "smExtsnRevision"), ("DISMAN-SCRIPT-MIB", "smExtsnDescr"),)) if mibBuilder.loadTexts: smLanguageGroup.setDescription('A collection of objects providing information about the\n capabilities of the scripting engine.') smScriptGroup2 = ObjectGroup((1, 3, 6, 1, 2, 1, 64, 3, 2, 7)).setObjects(*(("DISMAN-SCRIPT-MIB", "smScriptDescr"), ("DISMAN-SCRIPT-MIB", "smScriptLanguage"), ("DISMAN-SCRIPT-MIB", "smScriptSource"), ("DISMAN-SCRIPT-MIB", "smScriptAdminStatus"), ("DISMAN-SCRIPT-MIB", "smScriptOperStatus"), ("DISMAN-SCRIPT-MIB", "smScriptStorageType"), ("DISMAN-SCRIPT-MIB", "smScriptRowStatus"), ("DISMAN-SCRIPT-MIB", "smScriptError"), ("DISMAN-SCRIPT-MIB", "smScriptLastChange"),)) if mibBuilder.loadTexts: smScriptGroup2.setDescription('A collection of objects providing information about\n installed scripts.') smCodeGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 64, 3, 2, 3)).setObjects(*(("DISMAN-SCRIPT-MIB", "smCodeText"), ("DISMAN-SCRIPT-MIB", "smCodeRowStatus"),)) if mibBuilder.loadTexts: smCodeGroup.setDescription('A collection of objects used to download or modify scripts\n by using SNMP set requests.') smLaunchGroup2 = ObjectGroup((1, 3, 6, 1, 2, 1, 64, 3, 2, 8)).setObjects(*(("DISMAN-SCRIPT-MIB", "smLaunchScriptOwner"), ("DISMAN-SCRIPT-MIB", "smLaunchScriptName"), ("DISMAN-SCRIPT-MIB", "smLaunchArgument"), ("DISMAN-SCRIPT-MIB", "smLaunchMaxRunning"), ("DISMAN-SCRIPT-MIB", "smLaunchMaxCompleted"), ("DISMAN-SCRIPT-MIB", "smLaunchLifeTime"), ("DISMAN-SCRIPT-MIB", "smLaunchExpireTime"), ("DISMAN-SCRIPT-MIB", "smLaunchStart"), ("DISMAN-SCRIPT-MIB", "smLaunchControl"), ("DISMAN-SCRIPT-MIB", "smLaunchAdminStatus"), ("DISMAN-SCRIPT-MIB", "smLaunchOperStatus"), ("DISMAN-SCRIPT-MIB", "smLaunchRunIndexNext"), ("DISMAN-SCRIPT-MIB", "smLaunchStorageType"), ("DISMAN-SCRIPT-MIB", "smLaunchRowStatus"), ("DISMAN-SCRIPT-MIB", "smLaunchError"), ("DISMAN-SCRIPT-MIB", "smLaunchLastChange"), ("DISMAN-SCRIPT-MIB", "smLaunchRowExpireTime"),)) if mibBuilder.loadTexts: smLaunchGroup2.setDescription('A collection of objects providing information about scripts\n that can be launched.') smRunGroup2 = ObjectGroup((1, 3, 6, 1, 2, 1, 64, 3, 2, 9)).setObjects(*(("DISMAN-SCRIPT-MIB", "smRunArgument"), ("DISMAN-SCRIPT-MIB", "smRunStartTime"), ("DISMAN-SCRIPT-MIB", "smRunEndTime"), ("DISMAN-SCRIPT-MIB", "smRunLifeTime"), ("DISMAN-SCRIPT-MIB", "smRunExpireTime"), ("DISMAN-SCRIPT-MIB", "smRunExitCode"), ("DISMAN-SCRIPT-MIB", "smRunResult"), ("DISMAN-SCRIPT-MIB", "smRunState"), ("DISMAN-SCRIPT-MIB", "smRunControl"), ("DISMAN-SCRIPT-MIB", "smRunError"), ("DISMAN-SCRIPT-MIB", "smRunResultTime"), ("DISMAN-SCRIPT-MIB", "smRunErrorTime"),)) if mibBuilder.loadTexts: smRunGroup2.setDescription('A collection of objects providing information about running\n scripts.') smNotificationsGroup2 = NotificationGroup((1, 3, 6, 1, 2, 1, 64, 3, 2, 10)).setObjects(*(("DISMAN-SCRIPT-MIB", "smScriptAbort"), ("DISMAN-SCRIPT-MIB", "smScriptResult"), ("DISMAN-SCRIPT-MIB", "smScriptException"),)) if mibBuilder.loadTexts: smNotificationsGroup2.setDescription('The notifications emitted by the Script MIB.') smCompliance = ModuleCompliance((1, 3, 6, 1, 2, 1, 64, 3, 1, 1)).setObjects(*(("DISMAN-SCRIPT-MIB", "smLanguageGroup"), ("DISMAN-SCRIPT-MIB", "smScriptGroup"), ("DISMAN-SCRIPT-MIB", "smLaunchGroup"), ("DISMAN-SCRIPT-MIB", "smRunGroup"), ("DISMAN-SCRIPT-MIB", "smCodeGroup"),)) if mibBuilder.loadTexts: smCompliance.setDescription('The compliance statement for SNMP entities which implement\n the Script MIB.') smScriptGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 64, 3, 2, 2)).setObjects(*(("DISMAN-SCRIPT-MIB", "smScriptDescr"), ("DISMAN-SCRIPT-MIB", "smScriptLanguage"), ("DISMAN-SCRIPT-MIB", "smScriptSource"), ("DISMAN-SCRIPT-MIB", "smScriptAdminStatus"), ("DISMAN-SCRIPT-MIB", "smScriptOperStatus"), ("DISMAN-SCRIPT-MIB", "smScriptStorageType"), ("DISMAN-SCRIPT-MIB", "smScriptRowStatus"),)) if mibBuilder.loadTexts: smScriptGroup.setDescription('A collection of objects providing information about\n installed scripts.') smLaunchGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 64, 3, 2, 4)).setObjects(*(("DISMAN-SCRIPT-MIB", "smLaunchScriptOwner"), ("DISMAN-SCRIPT-MIB", "smLaunchScriptName"), ("DISMAN-SCRIPT-MIB", "smLaunchArgument"), ("DISMAN-SCRIPT-MIB", "smLaunchMaxRunning"), ("DISMAN-SCRIPT-MIB", "smLaunchMaxCompleted"), ("DISMAN-SCRIPT-MIB", "smLaunchLifeTime"), ("DISMAN-SCRIPT-MIB", "smLaunchExpireTime"), ("DISMAN-SCRIPT-MIB", "smLaunchStart"), ("DISMAN-SCRIPT-MIB", "smLaunchControl"), ("DISMAN-SCRIPT-MIB", "smLaunchAdminStatus"), ("DISMAN-SCRIPT-MIB", "smLaunchOperStatus"), ("DISMAN-SCRIPT-MIB", "smLaunchRunIndexNext"), ("DISMAN-SCRIPT-MIB", "smLaunchStorageType"), ("DISMAN-SCRIPT-MIB", "smLaunchRowStatus"),)) if mibBuilder.loadTexts: smLaunchGroup.setDescription('A collection of objects providing information about scripts\n that can be launched.') smRunGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 64, 3, 2, 5)).setObjects(*(("DISMAN-SCRIPT-MIB", "smRunArgument"), ("DISMAN-SCRIPT-MIB", "smRunStartTime"), ("DISMAN-SCRIPT-MIB", "smRunEndTime"), ("DISMAN-SCRIPT-MIB", "smRunLifeTime"), ("DISMAN-SCRIPT-MIB", "smRunExpireTime"), ("DISMAN-SCRIPT-MIB", "smRunExitCode"), ("DISMAN-SCRIPT-MIB", "smRunResult"), ("DISMAN-SCRIPT-MIB", "smRunState"), ("DISMAN-SCRIPT-MIB", "smRunControl"), ("DISMAN-SCRIPT-MIB", "smRunError"),)) if mibBuilder.loadTexts: smRunGroup.setDescription('A collection of objects providing information about running\n scripts.') smNotificationsGroup = NotificationGroup((1, 3, 6, 1, 2, 1, 64, 3, 2, 6)).setObjects(*(("DISMAN-SCRIPT-MIB", "smScriptAbort"), ("DISMAN-SCRIPT-MIB", "smScriptResult"),)) if mibBuilder.loadTexts: smNotificationsGroup.setDescription('The notifications emitted by the Script MIB.') mibBuilder.exportSymbols("DISMAN-SCRIPT-MIB", smLaunchLifeTime=smLaunchLifeTime, smRunGroup=smRunGroup, smRunArgument=smRunArgument, smLaunchError=smLaunchError, PYSNMP_MODULE_ID=scriptMIB, smCodeGroup=smCodeGroup, smExtsnDescr=smExtsnDescr, smLaunchTable=smLaunchTable, smScriptStorageType=smScriptStorageType, smExtsnIndex=smExtsnIndex, smLaunchGroup=smLaunchGroup, smRunExitCode=smRunExitCode, smRunTable=smRunTable, smGroups=smGroups, smExtsnVersion=smExtsnVersion, smLaunchArgument=smLaunchArgument, smScriptResult=smScriptResult, smLaunchLastChange=smLaunchLastChange, smNotifications=smNotifications, smLangRevision=smLangRevision, smCodeText=smCodeText, smScriptOwner=smScriptOwner, smScriptGroup=smScriptGroup, smLangTable=smLangTable, smScriptRowStatus=smScriptRowStatus, smScriptName=smScriptName, smRunLifeTime=smRunLifeTime, smLaunchControl=smLaunchControl, smLaunchStorageType=smLaunchStorageType, smCodeIndex=smCodeIndex, smObjects=smObjects, smConformance=smConformance, smExtsnRevision=smExtsnRevision, smScriptDescr=smScriptDescr, smLangVersion=smLangVersion, smScriptError=smScriptError, smExtsnExtension=smExtsnExtension, smRunState=smRunState, smRunControl=smRunControl, smLaunchOperStatus=smLaunchOperStatus, smExtsnEntry=smExtsnEntry, smLaunchAdminStatus=smLaunchAdminStatus, smLangEntry=smLangEntry, smLaunchMaxCompleted=smLaunchMaxCompleted, smScriptLanguage=smScriptLanguage, smScriptGroup2=smScriptGroup2, smScriptOperStatus=smScriptOperStatus, smLangDescr=smLangDescr, smLaunchEntry=smLaunchEntry, smLaunchStart=smLaunchStart, smRunIndex=smRunIndex, smLaunchRowExpireTime=smLaunchRowExpireTime, smLaunchRunIndexNext=smLaunchRunIndexNext, smScriptLastChange=smScriptLastChange, smCompliances=smCompliances, smTraps=smTraps, smScriptException=smScriptException, smLaunchScriptName=smLaunchScriptName, smCodeEntry=smCodeEntry, smScriptSource=smScriptSource, smRunObjects=smRunObjects, scriptMIB=scriptMIB, smExtsnTable=smExtsnTable, smRunErrorTime=smRunErrorTime, smCompliance2=smCompliance2, smLangVendor=smLangVendor, smLanguageGroup=smLanguageGroup, smRunResultTime=smRunResultTime, smScriptAbort=smScriptAbort, smRunGroup2=smRunGroup2, smLangLanguage=smLangLanguage, smNotificationsGroup=smNotificationsGroup, smNotificationsGroup2=smNotificationsGroup2, smRunExpireTime=smRunExpireTime, smExtsnVendor=smExtsnVendor, smScriptObjects=smScriptObjects, smLaunchRowStatus=smLaunchRowStatus, smRunEndTime=smRunEndTime, smLaunchGroup2=smLaunchGroup2, smLaunchOwner=smLaunchOwner, smCodeRowStatus=smCodeRowStatus, smLangIndex=smLangIndex, smRunError=smRunError, smLaunchName=smLaunchName, smScriptAdminStatus=smScriptAdminStatus, smRunResult=smRunResult, smCompliance=smCompliance, smLaunchExpireTime=smLaunchExpireTime, smLaunchScriptOwner=smLaunchScriptOwner, smRunStartTime=smRunStartTime, smLaunchMaxRunning=smLaunchMaxRunning, smScriptTable=smScriptTable, smRunEntry=smRunEntry, smCodeTable=smCodeTable, smScriptEntry=smScriptEntry)
326.542714
3,784
0.717399
8,818
64,982
5.286233
0.099342
0.024521
0.037971
0.008238
0.50974
0.460977
0.418394
0.375831
0.329064
0.296306
0
0.036493
0.183174
64,982
198
3,785
328.191919
0.841708
0.004771
0
0
0
0.282723
0.666239
0.005707
0
0
0
0
0
1
0
false
0.005236
0.036649
0
0.036649
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
dc65276a5f4f878d7b99c63b40dd8b99f59398d7
254
py
Python
app/emend/__init__.py
tantalor/emend
c9fc890c737fd4a767f2212c45c33b61fc240c32
[ "MIT" ]
3
2015-01-01T10:11:43.000Z
2016-05-08T18:35:48.000Z
app/emend/__init__.py
tantalor/emend
c9fc890c737fd4a767f2212c45c33b61fc240c32
[ "MIT" ]
null
null
null
app/emend/__init__.py
tantalor/emend
c9fc890c737fd4a767f2212c45c33b61fc240c32
[ "MIT" ]
null
null
null
from request_handler import RequestHandler from suggest import suggest from bookmarklet import bookmarklet from site_name import site_name from canonical_url import canonical_url from model import Edit, Site, User import bitly import twitter import html
25.4
42
0.866142
37
254
5.810811
0.459459
0.074419
0
0
0
0
0
0
0
0
0
0
0.125984
254
9
43
28.222222
0.968468
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
4
dc7e3fe327c93c32854c444d9f469511fa5747b7
169
py
Python
BigPyD/Catalog.py
eicherjc/BigPyD
eca7bf9d315e0c2f2a9ef416b31de59d4f92a49c
[ "Apache-2.0" ]
null
null
null
BigPyD/Catalog.py
eicherjc/BigPyD
eca7bf9d315e0c2f2a9ef416b31de59d4f92a49c
[ "Apache-2.0" ]
null
null
null
BigPyD/Catalog.py
eicherjc/BigPyD
eca7bf9d315e0c2f2a9ef416b31de59d4f92a49c
[ "Apache-2.0" ]
null
null
null
def catalog(BidSess): r = BidSess.request("GET", url="https://localhost/api/v1/data-catalog/?format=json", headers=BidSess.headers, verify=False) return r.json()
56.333333
127
0.715976
24
169
5.041667
0.75
0
0
0
0
0
0
0
0
0
0
0.006579
0.100592
169
3
128
56.333333
0.789474
0
0
0
0
0
0.311765
0
0
0
0
0
0
1
0.333333
false
0
0
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
4
dcaa4f98c16f0935f0ebe837deee5521ffa79c69
46
py
Python
combo/blm/core/__init__.py
yanpei18345156216/COMBO_Python3
666a116dfece71e6236291e89ea2ab4d6db0ead9
[ "MIT" ]
21
2019-07-04T17:30:27.000Z
2022-03-26T14:27:32.000Z
combo/blm/core/__init__.py
yanpei18345156216/COMBO_Python3
666a116dfece71e6236291e89ea2ab4d6db0ead9
[ "MIT" ]
2
2020-03-01T01:42:25.000Z
2020-03-01T02:59:37.000Z
combo/blm/core/__init__.py
yanpei18345156216/COMBO_Python3
666a116dfece71e6236291e89ea2ab4d6db0ead9
[ "MIT" ]
13
2019-08-07T14:08:04.000Z
2022-03-16T00:51:58.000Z
from .model import model __all__ = ["model"]
11.5
24
0.695652
6
46
4.666667
0.666667
0
0
0
0
0
0
0
0
0
0
0
0.173913
46
3
25
15.333333
0.736842
0
0
0
0
0
0.108696
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
dcb7b4df721e80627f3f290a147b14cde91ce1d8
79
py
Python
crits/certificates/migrate.py
dicato/crits
8d500f7175855f1aeefa94caa783f981062ba869
[ "MIT" ]
null
null
null
crits/certificates/migrate.py
dicato/crits
8d500f7175855f1aeefa94caa783f981062ba869
[ "MIT" ]
null
null
null
crits/certificates/migrate.py
dicato/crits
8d500f7175855f1aeefa94caa783f981062ba869
[ "MIT" ]
null
null
null
def migrate_certificate(self): """ Latest migration. """ pass
11.285714
30
0.56962
7
79
6.285714
1
0
0
0
0
0
0
0
0
0
0
0
0.303797
79
6
31
13.166667
0.8
0.21519
0
0
0
0
0
0
0
0
0
0
0
1
0.5
false
0.5
0
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
0
0
0
4
f4b30b59edb9cc8da793c1e87036e3de4bdfe927
81
py
Python
fly/apps.py
chunjie-sam-liu/LNCediting
24c1bbe5f03117da06d3d2fda492d4d5ad45c473
[ "MIT" ]
null
null
null
fly/apps.py
chunjie-sam-liu/LNCediting
24c1bbe5f03117da06d3d2fda492d4d5ad45c473
[ "MIT" ]
1
2020-04-14T11:33:29.000Z
2020-04-14T11:33:29.000Z
fly/apps.py
chunjie-sam-liu/LNCediting
24c1bbe5f03117da06d3d2fda492d4d5ad45c473
[ "MIT" ]
null
null
null
from django.apps import AppConfig class FlyConfig(AppConfig): name = 'fly'
13.5
33
0.728395
10
81
5.9
0.9
0
0
0
0
0
0
0
0
0
0
0
0.185185
81
5
34
16.2
0.893939
0
0
0
0
0
0.037037
0
0
0
0
0
0
1
0
false
0
0.333333
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
f4d00e4ab369e6586905a43e5610efd3a4151644
446
py
Python
modules/modules.py
oboforty/metaindex
290d6b581fb1c074e28d42dc750ab878585e2eb2
[ "MIT" ]
null
null
null
modules/modules.py
oboforty/metaindex
290d6b581fb1c074e28d42dc750ab878585e2eb2
[ "MIT" ]
null
null
null
modules/modules.py
oboforty/metaindex
290d6b581fb1c074e28d42dc750ab878585e2eb2
[ "MIT" ]
null
null
null
from .eme_utils import __module__ as utils from .search import __module__ as search from .db_builder import __module__ as db_builder from .doors_oauth import __module__ as oauth from .admin import __module__ as admin from .comments import __module__ as comments # favourites modules = [ utils, oauth, # todo: unfinished eme modules search, comments, # favourites, db_builder, admin, ]
20.272727
49
0.695067
54
446
5.203704
0.314815
0.256228
0.298932
0
0
0
0
0
0
0
0
0
0.26009
446
21
50
21.238095
0.851515
0.123318
0
0
0
0
0
0
0
0
0
0.047619
0
1
0
false
0
0.428571
0
0.428571
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
1
0
0
0
0
4
f4d03ace57dd2eacb60bf87a63998ec923d499a2
91
py
Python
lewis_emulators/instron_stress_rig/interfaces/__init__.py
ISISComputingGroup/EPICS-DeviceEmulator
026c2a14a16bb204ea7527e3765daa182cafa814
[ "BSD-3-Clause" ]
2
2020-10-20T16:49:13.000Z
2021-02-19T10:41:44.000Z
lewis_emulators/instron_stress_rig/interfaces/__init__.py
ISISComputingGroup/EPICS-DeviceEmulator
026c2a14a16bb204ea7527e3765daa182cafa814
[ "BSD-3-Clause" ]
9
2019-03-22T15:35:15.000Z
2021-07-28T11:05:43.000Z
lewis_emulators/instron_stress_rig/interfaces/__init__.py
ISISComputingGroup/EPICS-DeviceEmulator
026c2a14a16bb204ea7527e3765daa182cafa814
[ "BSD-3-Clause" ]
1
2020-10-21T17:02:44.000Z
2020-10-21T17:02:44.000Z
from .stream_interface import InstronStreamInterface __all__ = ['InstronStreamInterface']
22.75
52
0.846154
7
91
10.285714
0.857143
0
0
0
0
0
0
0
0
0
0
0
0.087912
91
3
53
30.333333
0.86747
0
0
0
0
0
0.241758
0.241758
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
7624b4c784f0bd50e72c5a9da56fb2bfa7034a44
161
py
Python
bricks_toolkit/menu_controller.py
Wilgnne/bricks_graphics
12cc2386d8e1dd0bbb4c49889a47630e8f3d1bd9
[ "MIT" ]
null
null
null
bricks_toolkit/menu_controller.py
Wilgnne/bricks_graphics
12cc2386d8e1dd0bbb4c49889a47630e8f3d1bd9
[ "MIT" ]
null
null
null
bricks_toolkit/menu_controller.py
Wilgnne/bricks_graphics
12cc2386d8e1dd0bbb4c49889a47630e8f3d1bd9
[ "MIT" ]
null
null
null
from screen_controller import BricksWall class Menu(object): """docstring for Menu""" def __init__(self, arg): super(Menu, self).__init__() self.arg = arg
23
40
0.726708
22
161
4.909091
0.681818
0.148148
0.203704
0
0
0
0
0
0
0
0
0
0.142857
161
7
41
23
0.782609
0.111801
0
0
0
0
0
0
0
0
0
0
0
1
0.2
false
0
0.2
0
0.6
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
4
52637a3f12bfd607f5b7c5feebcef588e8984fce
187
py
Python
code/7/stack_example.py
TeamLab/introduction_to_pythoy_TEAMLAB_MOOC
ebf1ff02d6a341bfee8695eac478ff8297cb97e4
[ "MIT" ]
65
2017-11-01T01:57:21.000Z
2022-02-08T13:36:25.000Z
code/7/stack_example.py
TeamLab/introduction_to_pythoy_TEAMLAB_MOOC
ebf1ff02d6a341bfee8695eac478ff8297cb97e4
[ "MIT" ]
9
2017-11-03T15:05:30.000Z
2018-05-17T03:18:36.000Z
code/7/stack_example.py
TeamLab/introduction_to_pythoy_TEAMLAB_MOOC
ebf1ff02d6a341bfee8695eac478ff8297cb97e4
[ "MIT" ]
64
2017-11-01T01:57:23.000Z
2022-01-19T03:52:12.000Z
word = input("Input a word: ") world_list = list(word) print(world_list) result = [] for _ in range(len(world_list)): result.append(world_list.pop()) print(result) print(word[::-1])
18.7
35
0.684492
29
187
4.241379
0.482759
0.292683
0.243902
0
0
0
0
0
0
0
0
0.006173
0.13369
187
9
36
20.777778
0.753086
0
0
0
0
0
0.074866
0
0
0
0
0
0
1
0
false
0
0
0
0
0.375
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
5266a9ceb1236bd28825acde939368713eb830d0
1,963
py
Python
src/encoded/types/data_release_update.py
4dn-dcic/fourfron
29601961706d2371b982e57ae085e8ebec3b2714
[ "MIT" ]
11
2016-11-23T02:33:13.000Z
2021-06-18T14:21:20.000Z
src/encoded/types/data_release_update.py
4dn-dcic/fourfron
29601961706d2371b982e57ae085e8ebec3b2714
[ "MIT" ]
1,159
2016-11-21T15:40:24.000Z
2022-03-29T03:18:38.000Z
src/encoded/types/data_release_update.py
4dn-dcic/fourfron
29601961706d2371b982e57ae085e8ebec3b2714
[ "MIT" ]
5
2017-01-27T16:36:15.000Z
2019-06-14T14:39:54.000Z
from snovault import ( collection, load_schema, ) from .base import ( Item ) @collection( name='data-release-updates', properties={ 'title': 'Data Release Updates', 'description': 'Metadata release updates for the Portal', }) class DataReleaseUpdate(Item): item_type = 'data_release_update' schema = load_schema('encoded:schemas/data_release_update.json') embedded_list = [ # ExperimentSetReplicate linkTo (accession) 'update_items.primary_id.status', 'update_items.primary_id.tags', 'update_items.primary_id.accession', # Experiment linkTo (accession) 'update_items.primary_id.experiments_in_set.status', 'update_items.primary_id.experiments_in_set.accession', # ExperimentType linkTo (title) 'update_items.primary_id.experiments_in_set.experiment_type.title', 'update_items.primary_id.experiments_in_set.experiment_type.display_title', # Experiment linkTo (accession) 'update_items.primary_id.experiments_in_set.accession', 'update_items.primary_id.experiments_in_set.experiment_categorizer', 'update_items.primary_id.experiments_in_set.files.status', 'update_items.primary_id.experiments_in_set.files.file_type', # Biosample linkTo 'update_items.primary_id.experiments_in_set.biosample.accession', 'update_items.primary_id.experiments_in_set.biosample.biosource_summary', # FileProcessed linkTo 'update_items.primary_id.experiments_in_set.processed_files.accession', 'update_items.primary_id.experiments_in_set.processed_files.status', 'update_items.primary_id.experiments_in_set.processed_files.file_type', # FileProcessed linkTo 'update_items.primary_id.processed_files.accession', 'update_items.primary_id.processed_files.status', 'update_items.primary_id.processed_files.file_type' ]
37.037736
83
0.72593
224
1,963
5.977679
0.223214
0.156087
0.255414
0.283794
0.681105
0.661688
0.586258
0.524272
0.324869
0.173264
0
0
0.181355
1,963
52
84
37.75
0.83323
0.096791
0
0.054054
0
0
0.673654
0.609065
0
0
0
0
0
1
0
false
0
0.054054
0
0.162162
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
52792a06f5f90f14c15cddfdf0b19e0b18d79086
201
py
Python
1043.py
heltonricardo/URI
160cca22d94aa667177c9ebf2a1c9864c5e55b41
[ "MIT" ]
6
2021-04-13T00:33:43.000Z
2022-02-10T10:23:59.000Z
1043.py
heltonricardo/URI
160cca22d94aa667177c9ebf2a1c9864c5e55b41
[ "MIT" ]
null
null
null
1043.py
heltonricardo/URI
160cca22d94aa667177c9ebf2a1c9864c5e55b41
[ "MIT" ]
3
2021-03-23T18:42:24.000Z
2022-02-10T10:24:07.000Z
A, B, C = [float(x) for x in input().split()] if (A + B > C) and (B + C > A) and (A + C > B): print('Perimetro = {:.1f}'.format(A + B + C)) else: print('Area = {:.1f}'.format(((A + B) * C) / 2.0))
40.2
56
0.462687
39
201
2.384615
0.487179
0.107527
0.129032
0.215054
0.236559
0
0
0
0
0
0
0.026144
0.238806
201
4
57
50.25
0.581699
0
0
0
0
0
0.154229
0
0
0
0
0
0
1
0
true
0
0
0
0
0.5
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
4
bfeb9d61bac6e746a93d55fb5cdf3b897a57287f
22
py
Python
morphops/_version.py
vishalbelsare/morphops
63f678c4762be957ca51bac0b92609d244ef18ce
[ "MIT" ]
22
2019-02-25T13:30:33.000Z
2022-03-14T00:15:57.000Z
deepblocks/version.py
blurry-mood/Deep-Learning-Blocks
e81cf9270c21189a90799671d9446af449e826be
[ "MIT" ]
12
2018-12-16T11:02:14.000Z
2021-10-12T11:25:15.000Z
deepblocks/version.py
blurry-mood/Deep-Learning-Blocks
e81cf9270c21189a90799671d9446af449e826be
[ "MIT" ]
3
2021-07-27T20:50:13.000Z
2021-09-30T17:16:32.000Z
__version__ = '0.1.13'
22
22
0.681818
4
22
2.75
1
0
0
0
0
0
0
0
0
0
0
0.2
0.090909
22
1
22
22
0.35
0
0
0
0
0
0.26087
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
bff43add0ea11b225fa7cfabcd889fadc8362c0b
5,046
py
Python
deepcvr/utils/config.py
john-james-ai/DeepCVR
d8c2f98ee4febf7b0a7131d1cf198cee02fcdb2e
[ "BSD-3-Clause" ]
null
null
null
deepcvr/utils/config.py
john-james-ai/DeepCVR
d8c2f98ee4febf7b0a7131d1cf198cee02fcdb2e
[ "BSD-3-Clause" ]
null
null
null
deepcvr/utils/config.py
john-james-ai/DeepCVR
d8c2f98ee4febf7b0a7131d1cf198cee02fcdb2e
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/env python3 # -*- coding:utf-8 -*- # ================================================================================================ # # Project : DeepCVR: Deep Learning for Conversion Rate Prediction # # Version : 0.1.0 # # File : /config.py # # Language : Python 3.10.2 # # ------------------------------------------------------------------------------------------------ # # Author : John James # # Email : john.james.ai.studio@gmail.com # # URL : https://github.com/john-james-ai/cvr # # ------------------------------------------------------------------------------------------------ # # Created : Monday, February 14th 2022, 1:25:40 pm # # Modified : Saturday, March 5th 2022, 2:58:58 pm # # Modifier : John James (john.james.ai.studio@gmail.com) # # ------------------------------------------------------------------------------------------------ # # License : BSD 3-clause "New" or "Revised" License # # Copyright: (c) 2022 Bryant St. Labs # # ================================================================================================ # from abc import ABC import os import yaml import yamlordereddictloader # ---------------------------------------------------------------------------- # class Config(ABC): """Abstract base class for Config classes.""" def load_config(self, filepath: str) -> dict: if os.path.exists(filepath): with open(filepath, "r") as f: return yaml.full_load(f) else: return {} def save_config(self, config: dict, filepath: str) -> None: os.makedirs(os.path.dirname(filepath), exist_ok=True) with open(filepath, "w") as f: yaml.dump(config, f) # ---------------------------------------------------------------------------- # def config_dag(filepath): if os.path.exists(filepath): with open(filepath, "r") as f: return yaml.load(f, Loader=yamlordereddictloader.Loader) else: return {} # ---------------------------------------------------------------------------- # class S3Config(Config): """Encapsulates the Amazon S3 Credentials and Configuration""" __filepath = "config/credentials.yaml" def __init__(self) -> None: super(S3Config, self).__init__() self._config = self.load_config(S3Config.__filepath)["amazon"] @property def bucket(self) -> str: return self._config["bucket"] @property def key(self) -> str: return self._config["aws_access_key_id"] @property def password(self) -> str: return self._config["aws_secret_access_key"] # ---------------------------------------------------------------------------- # class MySQLConfig(Config): """Encapsulates credentials for a MySQL user""" __filepath = "config/credentials.yaml" def __init__(self, user: str = "mysql") -> None: super(MySQLConfig, self).__init__() self._config = self.load_config(MySQLConfig.__filepath)[user] @property def host(self) -> str: return self._config["host"] @property def user(self) -> str: return self._config["user"] @property def password(self) -> str: return self._config["password"] @property def dbname(self) -> str: return self._config["dbname"] # ---------------------------------------------------------------------------- # class AirflowBackendConfig(Config): """Encapsulates the connection parameters for the Airflow backend database""" __filepath = "config/credentials.yaml" def __init__(self) -> None: super(AirflowBackendConfig, self).__init__() self._config = self.load_config(AirflowBackendConfig.__filepath)["airflow"] @property def user(self) -> str: return self._config["user"] @property def password(self) -> str: return self._config["password"] @property def host(self) -> str: return self._config["host"] @property def port(self) -> str: return self._config["port"] @property def dbname(self) -> str: return self._config["dbname"] @property def string(self) -> str: return self._config["string"]
36.042857
101
0.413397
399
5,046
5.062657
0.333333
0.084158
0.083663
0.109406
0.44505
0.410891
0.371782
0.304455
0.283663
0.19505
0
0.011487
0.32719
5,046
139
102
36.302158
0.583505
0.457392
0
0.573333
0
0
0.07425
0.035545
0
0
0
0
0
1
0.253333
false
0.066667
0.053333
0.173333
0.626667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
1
1
0
0
4
872336db0985a3880dd70eba194cbb9e11bef245
265
py
Python
dept.py
ebbieaden/pymodules
44ee374562f0bb685199b6534b3ca259a2cebe06
[ "MIT" ]
null
null
null
dept.py
ebbieaden/pymodules
44ee374562f0bb685199b6534b3ca259a2cebe06
[ "MIT" ]
null
null
null
dept.py
ebbieaden/pymodules
44ee374562f0bb685199b6534b3ca259a2cebe06
[ "MIT" ]
null
null
null
dept = { "Computer Science": 10, "Computer Engineering": 12 } depts = input("Department: ") if depts == "Computer_sci": print(dept["Computer_sci"]) elif depts == "Computer Engineering": print(dept["Computer Engineering"]) else: pass
22.083333
39
0.626415
28
265
5.857143
0.535714
0.219512
0.207317
0
0
0
0
0
0
0
0
0.019512
0.226415
265
12
40
22.083333
0.780488
0
0
0
0
0
0.421053
0
0
0
0
0
0
1
0
false
0.090909
0
0
0
0.181818
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
4
8738b8134122aafd306b5e882c415f5036ce4d47
14,089
py
Python
scripts/data_preparation/create_lmdb.py
NimrodShabtay/KAIR
d744959d75c22b016543dc65c04922ea4b59894f
[ "MIT" ]
1
2022-02-27T03:05:50.000Z
2022-02-27T03:05:50.000Z
scripts/data_preparation/create_lmdb.py
NimrodShabtay/KAIR
d744959d75c22b016543dc65c04922ea4b59894f
[ "MIT" ]
null
null
null
scripts/data_preparation/create_lmdb.py
NimrodShabtay/KAIR
d744959d75c22b016543dc65c04922ea4b59894f
[ "MIT" ]
null
null
null
import argparse from os import path as osp from utils.utils_video import scandir from utils.utils_lmdb import make_lmdb_from_imgs def create_lmdb_for_div2k(): """Create lmdb files for DIV2K dataset. Usage: Before run this script, please run `extract_subimages.py`. Typically, there are four folders to be processed for DIV2K dataset. DIV2K_train_HR_sub DIV2K_train_LR_bicubic/X2_sub DIV2K_train_LR_bicubic/X3_sub DIV2K_train_LR_bicubic/X4_sub Remember to modify opt configurations according to your settings. """ # HR images folder_path = 'trainsets/DIV2K/DIV2K_train_HR_sub' lmdb_path = 'trainsets/DIV2K/DIV2K_train_HR_sub.lmdb' img_path_list, keys = prepare_keys_div2k(folder_path) make_lmdb_from_imgs(folder_path, lmdb_path, img_path_list, keys) # LRx2 images folder_path = 'trainsets/DIV2K/DIV2K_train_LR_bicubic/X2_sub' lmdb_path = 'trainsets/DIV2K/DIV2K_train_LR_bicubic_X2_sub.lmdb' img_path_list, keys = prepare_keys_div2k(folder_path) make_lmdb_from_imgs(folder_path, lmdb_path, img_path_list, keys) # LRx3 images folder_path = 'trainsets/DIV2K/DIV2K_train_LR_bicubic/X3_sub' lmdb_path = 'trainsets/DIV2K/DIV2K_train_LR_bicubic_X3_sub.lmdb' img_path_list, keys = prepare_keys_div2k(folder_path) make_lmdb_from_imgs(folder_path, lmdb_path, img_path_list, keys) # LRx4 images folder_path = 'trainsets/DIV2K/DIV2K_train_LR_bicubic/X4_sub' lmdb_path = 'trainsets/DIV2K/DIV2K_train_LR_bicubic_X4_sub.lmdb' img_path_list, keys = prepare_keys_div2k(folder_path) make_lmdb_from_imgs(folder_path, lmdb_path, img_path_list, keys) def prepare_keys_div2k(folder_path): """Prepare image path list and keys for DIV2K dataset. Args: folder_path (str): Folder path. Returns: list[str]: Image path list. list[str]: Key list. """ print('Reading image path list ...') img_path_list = sorted(list(scandir(folder_path, suffix='png', recursive=False))) keys = [img_path.split('.png')[0] for img_path in sorted(img_path_list)] return img_path_list, keys def create_lmdb_for_reds(): """Create lmdb files for REDS dataset. Usage: Before run this script, please run `regroup_reds_dataset.py`. We take three folders for example: train_sharp train_sharp_bicubic train_blur (for video deblurring) Remember to modify opt configurations according to your settings. """ # train_sharp folder_path = 'trainsets/REDS/train_sharp' lmdb_path = 'trainsets/REDS/train_sharp_with_val.lmdb' img_path_list, keys = prepare_keys_reds(folder_path) make_lmdb_from_imgs(folder_path, lmdb_path, img_path_list, keys, multiprocessing_read=True) # train_sharp_bicubic folder_path = 'trainsets/REDS/train_sharp_bicubic' lmdb_path = 'trainsets/REDS/train_sharp_bicubic_with_val.lmdb' img_path_list, keys = prepare_keys_reds(folder_path) make_lmdb_from_imgs(folder_path, lmdb_path, img_path_list, keys, multiprocessing_read=True) # train_blur (for video deblurring) folder_path = 'trainsets/REDS_blur/train_blur' lmdb_path = 'trainsets/REDS_blur/train_blur_with_val.lmdb' img_path_list, keys = prepare_keys_reds(folder_path) make_lmdb_from_imgs(folder_path, lmdb_path, img_path_list, keys, multiprocessing_read=True) # train_blur_bicubic (for video deblurring-sr) folder_path = 'trainsets/REDS_blur_bicubic/train_blur_bicubic' lmdb_path = 'trainsets/REDS_blur_bicubic/train_blur_bicubic_with_val.lmdb' img_path_list, keys = prepare_keys_reds(folder_path) make_lmdb_from_imgs(folder_path, lmdb_path, img_path_list, keys, multiprocessing_read=True) def prepare_keys_reds(folder_path): """Prepare image path list and keys for REDS dataset. Args: folder_path (str): Folder path. Returns: list[str]: Image path list. list[str]: Key list. """ print('Reading image path list ...') img_path_list = sorted(list(scandir(folder_path, suffix='png', recursive=True))) keys = [v.split('.png')[0] for v in img_path_list] # example: 000/00000000 return img_path_list, keys def create_lmdb_for_vimeo90k(): """Create lmdb files for Vimeo90K dataset. Usage: Remember to modify opt configurations according to your settings. """ # GT folder_path = 'trainsets/vimeo90k/vimeo_septuplet/sequences' lmdb_path = 'trainsets/vimeo90k/vimeo90k_train_GT_only4th.lmdb' train_list_path = 'trainsets/vimeo90k/vimeo_septuplet/sep_trainlist.txt' img_path_list, keys = prepare_keys_vimeo90k(folder_path, train_list_path, 'gt') make_lmdb_from_imgs(folder_path, lmdb_path, img_path_list, keys, multiprocessing_read=True) # LQ folder_path = 'trainsets/vimeo90k/vimeo_septuplet_matlabLRx4/sequences' lmdb_path = 'trainsets/vimeo90k/vimeo90k_train_LR7frames.lmdb' train_list_path = 'trainsets/vimeo90k/vimeo_septuplet/sep_trainlist.txt' img_path_list, keys = prepare_keys_vimeo90k(folder_path, train_list_path, 'lq') make_lmdb_from_imgs(folder_path, lmdb_path, img_path_list, keys, multiprocessing_read=True) def create_lmdb_for_vimeo90k_bd(): """Create lmdb files for Vimeo90K dataset (blur-downsampled lr only). Usage: Remember to modify opt configurations according to your settings. """ # LQ (blur-downsampled, BD) folder_path = 'trainsets/vimeo90k/vimeo_septuplet_BDLRx4/sequences' lmdb_path = 'trainsets/vimeo90k/vimeo90k_train_BDLR7frames.lmdb' train_list_path = 'trainsets/vimeo90k/vimeo_septuplet/sep_trainlist.txt' img_path_list, keys = prepare_keys_vimeo90k(folder_path, train_list_path, 'lq') make_lmdb_from_imgs(folder_path, lmdb_path, img_path_list, keys, multiprocessing_read=True) def prepare_keys_vimeo90k(folder_path, train_list_path, mode): """Prepare image path list and keys for Vimeo90K dataset. Args: folder_path (str): Folder path. train_list_path (str): Path to the official train list. mode (str): One of 'gt' or 'lq'. Returns: list[str]: Image path list. list[str]: Key list. """ print('Reading image path list ...') with open(train_list_path, 'r') as fin: train_list = [line.strip() for line in fin] img_path_list = [] keys = [] for line in train_list: folder, sub_folder = line.split('/') img_path_list.extend([osp.join(folder, sub_folder, f'im{j + 1}.png') for j in range(7)]) keys.extend([f'{folder}/{sub_folder}/im{j + 1}' for j in range(7)]) if mode == 'gt': print('Only keep the 4th frame for the gt mode.') img_path_list = [v for v in img_path_list if v.endswith('im4.png')] keys = [v for v in keys if v.endswith('/im4')] return img_path_list, keys def create_lmdb_for_dvd(): """Create lmdb files for DVD dataset. Usage: We take two folders for example: GT input Remember to modify opt configurations according to your settings. """ # train_sharp folder_path = 'trainsets/DVD/train_GT' lmdb_path = 'trainsets/DVD/train_GT.lmdb' img_path_list, keys = prepare_keys_dvd(folder_path) make_lmdb_from_imgs(folder_path, lmdb_path, img_path_list, keys, multiprocessing_read=True) # train_sharp_bicubic folder_path = 'trainsets/DVD/train_GT_blurred' lmdb_path = 'trainsets/DVD/train_GT_blurred.lmdb' img_path_list, keys = prepare_keys_dvd(folder_path) make_lmdb_from_imgs(folder_path, lmdb_path, img_path_list, keys, multiprocessing_read=True) def prepare_keys_dvd(folder_path): """Prepare image path list and keys for DVD dataset. Args: folder_path (str): Folder path. Returns: list[str]: Image path list. list[str]: Key list. """ print('Reading image path list ...') img_path_list = sorted(list(scandir(folder_path, suffix='jpg', recursive=True))) keys = [v.split('.jpg')[0] for v in img_path_list] # example: 000/00000000 return img_path_list, keys def create_lmdb_for_gopro(): """Create lmdb files for GoPro dataset. Usage: We take two folders for example: GT input Remember to modify opt configurations according to your settings. """ # train_sharp folder_path = 'trainsets/GoPro/train_GT' lmdb_path = 'trainsets/GoPro/train_GT.lmdb' img_path_list, keys = prepare_keys_gopro(folder_path) make_lmdb_from_imgs(folder_path, lmdb_path, img_path_list, keys, multiprocessing_read=True) # train_sharp_bicubic folder_path = 'trainsets/GoPro/train_GT_blurred' lmdb_path = 'trainsets/GoPro/train_GT_blurred.lmdb' img_path_list, keys = prepare_keys_gopro(folder_path) make_lmdb_from_imgs(folder_path, lmdb_path, img_path_list, keys, multiprocessing_read=True) def prepare_keys_gopro(folder_path): """Prepare image path list and keys for GoPro dataset. Args: folder_path (str): Folder path. Returns: list[str]: Image path list. list[str]: Key list. """ print('Reading image path list ...') img_path_list = sorted(list(scandir(folder_path, suffix='png', recursive=True))) keys = [v.split('.png')[0] for v in img_path_list] # example: 000/00000000 return img_path_list, keys def create_lmdb_for_davis(): """Create lmdb files for DAVIS dataset. Usage: We take one folders for example: GT Remember to modify opt configurations according to your settings. """ # train_sharp folder_path = 'trainsets/DAVIS/train_GT' lmdb_path = 'trainsets/DAVIS/train_GT.lmdb' img_path_list, keys = prepare_keys_davis(folder_path) make_lmdb_from_imgs(folder_path, lmdb_path, img_path_list, keys, multiprocessing_read=True) def prepare_keys_davis(folder_path): """Prepare image path list and keys for DAVIS dataset. Args: folder_path (str): Folder path. Returns: list[str]: Image path list. list[str]: Key list. """ print('Reading image path list ...') img_path_list = sorted(list(scandir(folder_path, suffix='jpg', recursive=True))) keys = [v.split('.jpg')[0] for v in img_path_list] # example: 000/00000000 return img_path_list, keys def create_lmdb_for_ldv(): """Create lmdb files for LDV dataset. Usage: We take two folders for example: GT input Remember to modify opt configurations according to your settings. """ # training_raw folder_path = 'trainsets/LDV/training_raw' lmdb_path = 'trainsets/LDV/training_raw.lmdb' img_path_list, keys = prepare_keys_ldv(folder_path) make_lmdb_from_imgs(folder_path, lmdb_path, img_path_list, keys, multiprocessing_read=True) # training_fixed-QP folder_path = 'trainsets/LDV/training_fixed-QP' lmdb_path = 'trainsets/LDV/training_fixed-QP.lmdb' img_path_list, keys = prepare_keys_ldv(folder_path) make_lmdb_from_imgs(folder_path, lmdb_path, img_path_list, keys, multiprocessing_read=True) # training_fixed-rate folder_path = 'trainsets/LDV/training_fixed-rate' lmdb_path = 'trainsets/LDV/training_fixed-rate.lmdb' img_path_list, keys = prepare_keys_ldv(folder_path) make_lmdb_from_imgs(folder_path, lmdb_path, img_path_list, keys, multiprocessing_read=True) def prepare_keys_ldv(folder_path): """Prepare image path list and keys for LDV dataset. Args: folder_path (str): Folder path. Returns: list[str]: Image path list. list[str]: Key list. """ print('Reading image path list ...') img_path_list = sorted(list(scandir(folder_path, suffix='png', recursive=True))) keys = [v.split('.png')[0] for v in img_path_list] # example: 000/00000000 return img_path_list, keys def create_lmdb_for_reds_orig(): """Create lmdb files for REDS_orig dataset (120 fps). Usage: Before run this script, please run `regroup_reds_dataset.py`. We take one folders for example: train_orig Remember to modify opt configurations according to your settings. """ # train_sharp folder_path = 'trainsets/REDS_orig/train_orig' lmdb_path = 'trainsets/REDS_orig/train_orig_with_val.lmdb' img_path_list, keys = prepare_keys_reds_orig(folder_path) make_lmdb_from_imgs(folder_path, lmdb_path, img_path_list, keys, multiprocessing_read=True) def prepare_keys_reds_orig(folder_path): """Prepare image path list and keys for REDS_orig dataset (120 fps). Args: folder_path (str): Folder path. Returns: list[str]: Image path list. list[str]: Key list. """ print('Reading image path list ...') img_path_list = sorted(list(scandir(folder_path, suffix='png', recursive=True))) keys = [v.split('.png')[0] for v in img_path_list] # example: 000/00000000 return img_path_list, keys if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument( '--dataset', type=str, help=("Options: 'DIV2K', 'REDS', 'Vimeo90K', 'Vimeo90K_BD', 'DVD', 'GoPro'," "'DAVIS', 'LDV', 'REDS_orig' " 'You may need to modify the corresponding configurations in codes.')) args = parser.parse_args() dataset = args.dataset.lower() if dataset == 'div2k': create_lmdb_for_div2k() elif dataset == 'reds': create_lmdb_for_reds() elif dataset == 'vimeo90k': create_lmdb_for_vimeo90k() elif dataset == 'vimeo90k_bd': create_lmdb_for_vimeo90k_bd() elif dataset == 'dvd': create_lmdb_for_dvd() elif dataset == 'gopro': create_lmdb_for_gopro() elif dataset == 'davis': create_lmdb_for_davis() elif dataset == 'ldv': create_lmdb_for_ldv() elif dataset == 'reds_orig': create_lmdb_for_reds_orig() else: raise ValueError('Wrong dataset.')
35.134663
96
0.703173
1,980
14,089
4.681313
0.088384
0.098177
0.078326
0.079297
0.835581
0.806775
0.727263
0.680764
0.656381
0.615385
0
0.016813
0.202144
14,089
400
97
35.2225
0.807757
0.267585
0
0.386364
0
0
0.236328
0.176122
0
0
0
0
0
1
0.096591
false
0
0.022727
0
0.164773
0.051136
0
0
0
null
0
0
0
1
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
8742f24a7416d0d1d474f49a46d6889d12b02a66
29
py
Python
pypf/tests/__init__.py
ROSOLCH/pypf
e90333b929e5e037e1dabba349e5069f1c141768
[ "MIT" ]
18
2018-02-15T06:54:10.000Z
2022-03-25T10:15:52.000Z
pypf/tests/__init__.py
ROSOLCH/pypf
e90333b929e5e037e1dabba349e5069f1c141768
[ "MIT" ]
24
2017-08-13T18:01:13.000Z
2022-03-31T02:11:03.000Z
pypf/tests/__init__.py
ROSOLCH/pypf
e90333b929e5e037e1dabba349e5069f1c141768
[ "MIT" ]
13
2018-04-18T00:27:01.000Z
2021-08-18T21:18:39.000Z
"""Module initialization."""
14.5
28
0.689655
2
29
10
1
0
0
0
0
0
0
0
0
0
0
0
0.068966
29
1
29
29
0.740741
0.758621
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
874bec817001894bf56db865b0d3d1d5fa769517
341
py
Python
tests/test_imports.py
samirelanduk/quickplots
59f5e6ff367b2c1c24ba7cf1805d03552034c6d8
[ "MIT" ]
1
2016-02-28T00:17:29.000Z
2016-02-28T00:17:29.000Z
tests/test_imports.py
samirelanduk/quickplots
59f5e6ff367b2c1c24ba7cf1805d03552034c6d8
[ "MIT" ]
null
null
null
tests/test_imports.py
samirelanduk/quickplots
59f5e6ff367b2c1c24ba7cf1805d03552034c6d8
[ "MIT" ]
null
null
null
from unittest import TestCase import quickplots class QuickObjectsTests(TestCase): def test_line_imported(self): from quickplots.quick import line self.assertIs(line, quickplots.line) def test_scatter_imported(self): from quickplots.quick import scatter self.assertIs(scatter, quickplots.scatter)
24.357143
50
0.739003
39
341
6.358974
0.384615
0.056452
0.129032
0.209677
0.298387
0.298387
0
0
0
0
0
0
0.199413
341
13
51
26.230769
0.908425
0
0
0
0
0
0
0
0
0
0
0
0.222222
1
0.222222
false
0
0.666667
0
1
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
0
1
0
0
4
874f99748faad9dc7a6989c3ea5b79645d5b42ba
717
py
Python
Python-Fundamentals-June-2019/02_functions_and_debugging/01_blank_receipt.py
marinakolova/Python-Courses
eb95c782307be561b5026c5adafaa001b04caf4f
[ "MIT" ]
null
null
null
Python-Fundamentals-June-2019/02_functions_and_debugging/01_blank_receipt.py
marinakolova/Python-Courses
eb95c782307be561b5026c5adafaa001b04caf4f
[ "MIT" ]
null
null
null
Python-Fundamentals-June-2019/02_functions_and_debugging/01_blank_receipt.py
marinakolova/Python-Courses
eb95c782307be561b5026c5adafaa001b04caf4f
[ "MIT" ]
null
null
null
def print_receipt_header(): """ Prints the receipt header :return: None """ print('CASH RECEIPT') print('------------------------------') def print_receipt_body(): """ Prints the receipt body :return: None """ print('Charged to____________________') print('Received by___________________') def print_receipt_footer(): """ Prints the receipt footer :return: None """ print('------------------------------') print('\u00A9 SoftUni') def print_receipt(): """ Prints a blank receipt :return: None """ print_receipt_header() print_receipt_body() print_receipt_footer() if __name__ == '__main__': print_receipt()
18.384615
43
0.566248
67
717
5.149254
0.328358
0.278261
0.173913
0
0
0
0
0
0
0
0
0.005474
0.235704
717
39
44
18.384615
0.624088
0.214784
0
0.133333
0
0
0.316222
0.211499
0
0
0
0
0
1
0.266667
true
0
0
0
0.266667
0.933333
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
0
0
0
1
0
4
5e641d34408171e4764f9396d0a36d0b70239ba9
7,654
py
Python
models/linear.py
notreallyme2/teas
26e0482bccb73cd7236e5a69b5caf59660ecee7a
[ "MIT" ]
1
2021-01-07T03:37:44.000Z
2021-01-07T03:37:44.000Z
models/linear.py
notreallyme2/teas
26e0482bccb73cd7236e5a69b5caf59660ecee7a
[ "MIT" ]
null
null
null
models/linear.py
notreallyme2/teas
26e0482bccb73cd7236e5a69b5caf59660ecee7a
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # coding: utf-8 """This module contains classes (models) and methods for exploring linear autoencoders """ import numpy as np import pandas as pd from torch import nn, optim, tensor, FloatTensor from torch.utils.data import Dataset, DataLoader, random_split import torch.nn.functional as F class LinearMLP(nn.Module): """A pytorch module to build a simple linear multilayer perceptron""" def __init__(self, arch = [100, 100]): """ Parameters ---------- arch : list[int] The architecture of the MLP. Each element in the list is the number of nodes in a layer. E.g. arch = [100, 50, 100] creates an MLP with 100 inputs and outpus and a 50 node hidden layer """ super().__init__() self.arch = arch layers = [] for i in range(len(self.arch) - 1): layers.append(nn.Linear(self.arch[i], self.arch[i+1])) self.net = nn.Sequential(*layers) def forward(self, X): return (self.net(X)) def update_batch(self, X, Y, optimizer, criterion, train = True): """update_batch takes data, an optimizer, a loss function and a boolean indicating whether this update should be treated as a training run (i.e. the model's weights should be updated) or not. Parameters ---------- model : torch.nn.mnodule The model to be updated X : torch.FloatTensor The input data (i.e feature matrix) Y : torch.FloatTensor The target matrix) optimizer : torch.optim The optimizer to be used criterion : torch.nn.Module The loss function train : bool Should the weights be updated (default = True) """ Y_hat = self.forward(X) loss = criterion(Y_hat, Y) if train: loss.backward() optimizer.step() optimizer.zero_grad() return loss.item() class LinearAE(nn.Module): """A pytorch module to build a simple linear autoencoder""" def __init__(self, input_dim = 100, hidden_dim = 512): """ Parameters ---------- input_dim : int The number of input (and output) features hidden_dim : int The number of features in the hidden layer """ super().__init__() self.input = nn.Linear(input_dim, hidden_dim) self.output = nn.Linear(hidden_dim, input_dim) def forward(self, X): X = self.input(X) return (self.output(X)) def Z_from_X(self, X): return self.input(X) def X_from_Z(self, Z): return self.output(Z) def update_batch(self, X, optimizer, criterion, train = True): """update_batch takes data, an optimizer, a loss function and a boolean indicating whether this update should be treated as a training run (i.e. the model's weights should be updated) or not. Parameters ---------- model : torch.nn.mnodule The model to be updated X : torch.FloatTensor The input data (i.e feature matrix) optimizer : torch.optim The optimizer to be used criterion : torch.nn.Module The loss function train : bool Should the weights be updated (default = True) """ X_tilde = self.forward(X) loss = criterion(X_tilde, X) if train: loss.backward() optimizer.step() optimizer.zero_grad() return loss.item() class LinearFEA(nn.Module): """A pytorch module to build a linear forward-embedding autoencoder""" def __init__(self, input_dim = 100, hidden_dim = 512, output_dim = 1000): """ Parameters ---------- input_dim : int The number of input features hidden_dim : int The number of features in the hidden layer output_dim : int The number of output features """ super().__init__() self.input = nn.Linear(input_dim, hidden_dim) self.predict_Y = nn.Linear(hidden_dim, output_dim) self.reconstruct_X = nn.Linear(hidden_dim, input_dim) def forward(self, X): Z = self.input(X) Y_hat = self.predict_Y(Z) X_tilde = self.reconstruct_X(Z) return Y_hat, X_tilde def update_batch(self, X, Y, optimizer, criterion, train = True): """update_batch takes data, an optimizer, a loss function and a boolean indicating whether this update should be treated as a training run (i.e. the model's weights should be updated) or not. Parameters ---------- model : torch.nn.mnodule The model to be updated X : torch.FloatTensor The input data (i.e feature matrix) Y : torch.FloatTensor The target matrix) optimizer : torch.optim The optimizer to be used criterion : torch.nn.Module The loss function train : bool Should the weights be updated (default = True) """ Y_hat, X_tilde = self.forward(X) loss = criterion(X, X_tilde, Y, Y_hat) if train: loss.backward() optimizer.step() optimizer.zero_grad() return loss.item() class LinearTEA(nn.Module): """A pytorch module to build a linear target-embedding autoencoder""" def __init__(self, input_dim=100, hidden_dim = 256, output_dim=1000): """ Parameters ---------- input_dim : int The number of input features hidden_dim : int The number of features in the hidden layer output_dim : int The number of output features """ super().__init__() self.input_X = nn.Linear(input_dim, hidden_dim) self.input_Y = nn.Linear(output_dim, hidden_dim) self.predict_Y = nn.Linear(hidden_dim, output_dim) def forward(self, X, Y): """ Forward pass through the model Parameters ---------- X : tensor Y : tensor """ Z_from_X = self.input_X(X) Z_from_Y = self.input_Y(Y) Y_hat = self.predict_Y(Z_from_Y) return Y_hat, Z_from_Y, Z_from_X def predict_Y_from_X(self, X): """Make a prediction of Y from X. For inference use""" Z_from_X = self.input_X(X) Y_hat = self.predict_Y(Z_from_X) return Y_hat def update_batch(self, X, Y, optimizer, criterion, train = True): """update_batch takes data, an optimizer, a loss function and a boolean indicating whether this update should be treated as a training run (i.e. the model's weights should be updated) or not. Parameters ---------- model : torch.nn.mnodule The model to be updated X : torch.FloatTensor The input data (i.e feature matrix) Y : torch.FloatTensor The target matrix) optimizer : torch.optim The optimizer to be used criterion : torch.nn.Module The loss function train : bool Should the weights be updated (default = True) """ Y_hat, Z, Z_hat = self.forward(X, Y) loss = criterion(Y, Y_hat, Z, Z_hat) if train: loss.backward() optimizer.step() optimizer.zero_grad() return loss.item()
33.718062
196
0.572642
982
7,654
4.330957
0.150713
0.029626
0.023278
0.028215
0.760875
0.738773
0.738773
0.709852
0.686104
0.669175
0
0.009649
0.336556
7,654
227
197
33.718062
0.827885
0.438986
0
0.423529
0
0
0
0
0
0
0
0
0
1
0.176471
false
0
0.058824
0.035294
0.411765
0
0
0
0
null
0
0
0
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
5e83d48f36939ff835026d2f940bebef354dd80a
130
py
Python
tests/helpers.py
lewoudar/scalpel
455a3ff766c91d02f33957ea17f1cfbec141ab60
[ "Apache-2.0" ]
15
2020-11-02T21:11:03.000Z
2022-03-10T14:17:46.000Z
tests/helpers.py
lewoudar/scalpel
455a3ff766c91d02f33957ea17f1cfbec141ab60
[ "Apache-2.0" ]
4
2020-11-01T17:54:15.000Z
2022-03-04T21:42:41.000Z
tests/helpers.py
lewoudar/scalpel
455a3ff766c91d02f33957ea17f1cfbec141ab60
[ "Apache-2.0" ]
2
2021-05-01T06:59:12.000Z
2021-11-25T07:01:02.000Z
def assert_dicts(dict1, dict2): assert len(dict1) == len(dict2) for key in dict1: assert dict1[key] == dict2[key]
26
39
0.630769
19
130
4.263158
0.473684
0
0
0
0
0
0
0
0
0
0
0.070707
0.238462
130
4
40
32.5
0.747475
0
0
0
0
0
0
0
0
0
0
0
0.75
1
0.25
false
0
0
0
0.25
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
1
0
0
0
0
0
0
0
4
5e96df112a9e557a61742a85397c09a1c67cde44
108
py
Python
.vscode/desafios/desafio 003.py
FonsecaThay/Curso-de-python
58095dcb1f59d9e61aeab5a9de332e463f330d12
[ "MIT" ]
null
null
null
.vscode/desafios/desafio 003.py
FonsecaThay/Curso-de-python
58095dcb1f59d9e61aeab5a9de332e463f330d12
[ "MIT" ]
null
null
null
.vscode/desafios/desafio 003.py
FonsecaThay/Curso-de-python
58095dcb1f59d9e61aeab5a9de332e463f330d12
[ "MIT" ]
null
null
null
a = int(input('Digite um número:')) b = int(input('Digite outro número:')) s = a+b print('A soma vale', s)
18
38
0.62037
20
108
3.35
0.6
0.238806
0.41791
0
0
0
0
0
0
0
0
0
0.166667
108
5
39
21.6
0.744444
0
0
0
0
0
0.444444
0
0
0
0
0
0
1
0
false
0
0
0
0
0.25
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
5eb3264d58e7f75f372a39aeb41d15a1d05ac379
1,100
py
Python
abcdeep/queue.py
Conchylicultor/AbcDeep
6fcfc03a1a516ccd760201bb004098e6f6fe0e7e
[ "Apache-2.0" ]
1
2017-09-10T14:13:39.000Z
2017-09-10T14:13:39.000Z
abcdeep/queue.py
Conchylicultor/AbcDeep
6fcfc03a1a516ccd760201bb004098e6f6fe0e7e
[ "Apache-2.0" ]
null
null
null
abcdeep/queue.py
Conchylicultor/AbcDeep
6fcfc03a1a516ccd760201bb004098e6f6fe0e7e
[ "Apache-2.0" ]
null
null
null
# Copyright 2017 Conchylicultor. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """ """ import tensorflow as tf def create_queue(x, y, batch_size): """ Temporary function which create a queue from the given data Args: x: Input y: Target Return: (t_x, t_y): a tuple of tensor corresponding to the queue output """ # TODO: Replace this function by more advanced/flexible queue return tf.train.batch([x, y], batch_size) class InputQueues: pass
30.555556
80
0.662727
152
1,100
4.763158
0.651316
0.082873
0.035912
0.044199
0
0
0
0
0
0
0
0.00905
0.196364
1,100
35
81
31.428571
0.809955
0.801818
0
0
0
0
0
0
0
0
0
0.028571
0
1
0.2
false
0.2
0.2
0
0.8
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
null
0
0
1
0
0
0
0
1
0
0
1
0
0
4
0d7dbf7f45021bc9cf900caac9ab23c59377d6f4
1,296
py
Python
odin/metrics/compute_sharpe_ratio.py
gsamarakoon/Odin
e2e9d638c68947d24f1260d35a3527dd84c2523f
[ "MIT" ]
103
2017-01-14T19:38:14.000Z
2022-03-10T12:52:09.000Z
odin/metrics/compute_sharpe_ratio.py
gsamarakoon/Odin
e2e9d638c68947d24f1260d35a3527dd84c2523f
[ "MIT" ]
6
2017-01-19T01:38:53.000Z
2020-03-09T19:03:18.000Z
odin/metrics/compute_sharpe_ratio.py
JamesBrofos/Odin
e2e9d638c68947d24f1260d35a3527dd84c2523f
[ "MIT" ]
33
2017-02-05T21:51:17.000Z
2021-12-22T20:38:30.000Z
"""Sharpe Ratio Module Create the Sharpe ratio for the strategy, based on a benchmark of zero (i.e. no risk-free rate information); this is coincidentally the proper risk-free rate to utilize for dollar-neutral strategies. The Sharpe ratio generally measures the returns of a strategy relative to its historical risk; the higher this ratio, the more heavily leveraged the strategy may be. The Sharpe ratio is assumed to be annualized to a yearly period since there are 252 trading days in a year. For higher or lower frequency strategies, this annualization constant may be augmented. """ import numpy as np def compute_sharpe_ratio(returns, periods=252.0): """Computes the Sharpe ratio based on a time-series of returns. Parameters ---------- returns: Pandas data frame. A Pandas data frame where the index is a time-series of dates corresponding to a historical period of performance of a trading algorithm. The values are the day-over-day percentage changes in equity. periods (Optional): Float. The annualization constant for computing the Sharpe ratio. By default, this corresponds to daily trades (because there are 252 trading sessions per year). """ return np.sqrt(periods) * returns.mean() / returns.std()
41.806452
80
0.740741
194
1,296
4.938144
0.520619
0.080376
0.073069
0.037578
0
0
0
0
0
0
0
0.009681
0.202932
1,296
30
81
43.2
0.917715
0.849537
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
false
0
0.333333
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
0
1
0
0
4
0d84a31b2e2e97a4c0ad01198c8ceb65004a3cda
157
py
Python
main.py
Samathingamajig/center-of-mass-py
d5bced8293d93fbfbc6a05fdae8fe017061cd396
[ "MIT" ]
null
null
null
main.py
Samathingamajig/center-of-mass-py
d5bced8293d93fbfbc6a05fdae8fe017061cd396
[ "MIT" ]
null
null
null
main.py
Samathingamajig/center-of-mass-py
d5bced8293d93fbfbc6a05fdae8fe017061cd396
[ "MIT" ]
null
null
null
from question_wrapper import run_all_questions if __name__ == "__main__": questions = __import__("questions") run_all_questions(require_input=True)
26.166667
46
0.783439
19
157
5.526316
0.684211
0.114286
0.285714
0
0
0
0
0
0
0
0
0
0.133758
157
5
47
31.4
0.772059
0
0
0
0
0
0.10828
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
0db5d3fc1c72e2b45ec3b2f3936ebad8e83cca5f
18
py
Python
python/testData/keywordCompletion/caseInsideMatchStatement.after.py
06needhamt/intellij-community
63d7b8030e4fdefeb4760e511e289f7e6b3a5c5b
[ "Apache-2.0" ]
null
null
null
python/testData/keywordCompletion/caseInsideMatchStatement.after.py
06needhamt/intellij-community
63d7b8030e4fdefeb4760e511e289f7e6b3a5c5b
[ "Apache-2.0" ]
null
null
null
python/testData/keywordCompletion/caseInsideMatchStatement.after.py
06needhamt/intellij-community
63d7b8030e4fdefeb4760e511e289f7e6b3a5c5b
[ "Apache-2.0" ]
null
null
null
match 42: case
9
9
0.611111
3
18
3.666667
1
0
0
0
0
0
0
0
0
0
0
0.166667
0.333333
18
2
10
9
0.75
0
0
0
0
0
0
0
0
0
0
0
0
0
null
null
0
0
null
null
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
4
0dcb0a442ab65be2a92cff733aba7b9f5b7bea33
138
py
Python
sumUptoK.py
pokovenkat/python-programs
eba88c7f0815b6c202a3ad399151b6699ebb91fe
[ "Apache-2.0" ]
null
null
null
sumUptoK.py
pokovenkat/python-programs
eba88c7f0815b6c202a3ad399151b6699ebb91fe
[ "Apache-2.0" ]
null
null
null
sumUptoK.py
pokovenkat/python-programs
eba88c7f0815b6c202a3ad399151b6699ebb91fe
[ "Apache-2.0" ]
null
null
null
n=int(input()) k=int(input()) arr=[] tot=0 for i in range(n): arr.append(int(input())) for i in range (k): tot+=arr[i] print(tot)
13.8
28
0.586957
28
138
2.892857
0.464286
0.296296
0.148148
0.271605
0
0
0
0
0
0
0
0.008772
0.173913
138
9
29
15.333333
0.701754
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0.111111
1
0
0
null
1
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
2180f61b1e02a2cf0644e6babda7cc813a551d46
15
py
Python
civil/apps/definitions/__init__.py
christopinka/django-civil
d134624da9d36c4ba0bea2df8a21698df196bdf6
[ "Apache-2.0" ]
3
2020-06-15T21:01:06.000Z
2022-02-17T17:41:57.000Z
civil/apps/frontend/__init__.py
christopinka/django-civil
d134624da9d36c4ba0bea2df8a21698df196bdf6
[ "Apache-2.0" ]
null
null
null
civil/apps/frontend/__init__.py
christopinka/django-civil
d134624da9d36c4ba0bea2df8a21698df196bdf6
[ "Apache-2.0" ]
1
2021-11-06T18:33:29.000Z
2021-11-06T18:33:29.000Z
__deps__ = []
5
13
0.533333
1
15
4
1
0
0
0
0
0
0
0
0
0
0
0
0.266667
15
2
14
7.5
0.363636
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
21a2eb4f95f1b1cf6c59d29dd5f0b98ea4119af3
161
py
Python
pyrobolearn/utils/data_structures/__init__.py
Pandinosaurus/pyrobolearn
9cd7c060723fda7d2779fa255ac998c2c82b8436
[ "Apache-2.0" ]
2
2021-01-21T21:08:30.000Z
2022-03-29T16:45:49.000Z
pyrobolearn/utils/data_structures/__init__.py
Pandinosaurus/pyrobolearn
9cd7c060723fda7d2779fa255ac998c2c82b8436
[ "Apache-2.0" ]
null
null
null
pyrobolearn/utils/data_structures/__init__.py
Pandinosaurus/pyrobolearn
9cd7c060723fda7d2779fa255ac998c2c82b8436
[ "Apache-2.0" ]
1
2020-09-29T21:25:39.000Z
2020-09-29T21:25:39.000Z
# -*- coding: utf-8 -*- # Define common data structures # Ordered sets from .orderedset import * # Queues from .queues import * # Graph from .graph import *
12.384615
31
0.677019
20
161
5.45
0.7
0
0
0
0
0
0
0
0
0
0
0.007813
0.204969
161
12
32
13.416667
0.84375
0.478261
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
4
21bfb3fc0011d4eaf3a384e1e6de292f20c3f420
435
py
Python
bauer_bsm/bsm/format.py
alkradhazar/test1
addb391d9e8bac8a7d8ed8a5310fb695ce15eaf6
[ "Apache-2.0" ]
7
2020-07-07T07:43:41.000Z
2022-01-21T22:31:33.000Z
bauer_bsm/bsm/format.py
alkradhazar/test1
addb391d9e8bac8a7d8ed8a5310fb695ce15eaf6
[ "Apache-2.0" ]
2
2021-02-28T22:06:54.000Z
2021-09-29T09:47:45.000Z
bauer_bsm/bsm/format.py
alkradhazar/test1
addb391d9e8bac8a7d8ed8a5310fb695ce15eaf6
[ "Apache-2.0" ]
2
2021-09-24T03:49:19.000Z
2022-03-02T12:53:00.000Z
# BSM Python library and command line tool # # Copyright (C) 2020 chargeIT mobility GmbH # # SPDX-License-Identifier: Apache-2.0 def format_point(point): return '{}: {}'.format(point.point_type.id, format_point_value(point)) def format_point_value(point): unit = '' if point.value is not None and point.point_type.units: unit = ' {}'.format(point.point_type.units) return '{}{}'.format(point.value, unit)
22.894737
74
0.687356
61
435
4.770492
0.52459
0.226804
0.164948
0.137457
0
0
0
0
0
0
0
0.016667
0.172414
435
18
75
24.166667
0.791667
0.271264
0
0
0
0
0.041801
0
0
0
0
0
0
1
0.285714
false
0
0
0.142857
0.571429
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
4
21c58d377aee1421c8478b364e9aea3ab83d549a
585
py
Python
rstbx/viewer/controls.py
rimmartin/cctbx_project
644090f9432d9afc22cfb542fc3ab78ca8e15e5d
[ "BSD-3-Clause-LBNL" ]
null
null
null
rstbx/viewer/controls.py
rimmartin/cctbx_project
644090f9432d9afc22cfb542fc3ab78ca8e15e5d
[ "BSD-3-Clause-LBNL" ]
null
null
null
rstbx/viewer/controls.py
rimmartin/cctbx_project
644090f9432d9afc22cfb542fc3ab78ca8e15e5d
[ "BSD-3-Clause-LBNL" ]
null
null
null
from __future__ import division from rstbx.viewer import results_base import wx class ListBase(wx.ListCtrl): def Reset(self): self.dataSource = results_base.EmptyData() self.RefreshAllItems() def RefreshAllItems(self): n_items = self.dataSource.GetItemCount() self.SetItemCount(n_items) if (n_items > 0): self.RefreshItems(0, n_items - 1) def OnGetItemImage(self, item): return self.dataSource.GetItemImage(item) def OnGetItemAttr(self, item): pass def OnGetItemText(self, item, col): return self.dataSource.GetItemText(item, col)
23.4
49
0.726496
73
585
5.684932
0.479452
0.13494
0.096386
0
0
0
0
0
0
0
0
0.006211
0.174359
585
24
50
24.375
0.853002
0
0
0
0
0
0
0
0
0
0
0
0
1
0.277778
false
0.055556
0.166667
0.111111
0.611111
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
1
1
0
0
4
21ee818f063682f8e9151c26f5697c3b3793529f
1,195
py
Python
examples/get_state_flight_controller.py
msanchezc/dji-asdk-to-python
cf3e56691524624314a28f5ebc6f3f59cbd4d8cb
[ "BSD-3-Clause" ]
null
null
null
examples/get_state_flight_controller.py
msanchezc/dji-asdk-to-python
cf3e56691524624314a28f5ebc6f3f59cbd4d8cb
[ "BSD-3-Clause" ]
null
null
null
examples/get_state_flight_controller.py
msanchezc/dji-asdk-to-python
cf3e56691524624314a28f5ebc6f3f59cbd4d8cb
[ "BSD-3-Clause" ]
2
2021-01-05T13:25:25.000Z
2022-01-29T06:02:35.000Z
from dji_asdk_to_python.products.aircraft import Aircraft from dji_asdk_to_python.flight_controller.flight_controller_state import ( FlightControllerState, ) APP_IP = "192.168.0.110" drone = Aircraft(APP_IP) fc = drone.getFlightController() flight_controller_state = fc.getState() print("areMotorsOn %s " % flight_controller_state.areMotorsOn()) print("isFlying %s " % flight_controller_state.isFlying()) print("velocityX %s " % flight_controller_state.getVelocityX()) print("velocityY %s " % flight_controller_state.getVelocityY()) print("velocityZ %s " % flight_controller_state.getVelocityZ()) aircraft_location = flight_controller_state.getAircraftLocation() print("getAltitude %s " % aircraft_location.getAltitude()) print("getLatitude %s " % aircraft_location.getLatitude()) print("getLongitude %s " % aircraft_location.getLongitude()) aircraft_attitude = flight_controller_state.getAttitude() print("pitch %s " % aircraft_attitude.pitch) print("roll %s " % aircraft_attitude.roll) print("yaw %s " % aircraft_attitude.yaw) print("GoHomeExecutionState %s" % flight_controller_state.getGoHomeExecutionState()) print("getFlightMode %s" % flight_controller_state.getFlightMode())
36.212121
84
0.793305
136
1,195
6.683824
0.330882
0.211221
0.254125
0.169417
0.041804
0
0
0
0
0
0
0.009166
0.087029
1,195
32
85
37.34375
0.824015
0
0
0
0
0
0.157322
0
0
0
0
0
0
1
0
false
0
0.086957
0
0.086957
0.565217
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
4
21f1cd0003392f05f3d28dc7abec8eaa0ec3733d
61,768
py
Python
tests/rpc/test_rpc_telegram.py
Fractate/freqbot
47b35d2320dc97977411454c1466c762d339fdee
[ "MIT" ]
1
2022-03-06T22:44:30.000Z
2022-03-06T22:44:30.000Z
tests/rpc/test_rpc_telegram.py
Fractate/freqbot
47b35d2320dc97977411454c1466c762d339fdee
[ "MIT" ]
null
null
null
tests/rpc/test_rpc_telegram.py
Fractate/freqbot
47b35d2320dc97977411454c1466c762d339fdee
[ "MIT" ]
1
2021-09-22T23:28:21.000Z
2021-09-22T23:28:21.000Z
# pragma pylint: disable=missing-docstring, C0103 # pragma pylint: disable=protected-access, unused-argument, invalid-name # pragma pylint: disable=too-many-lines, too-many-arguments import logging import re from datetime import datetime from functools import reduce from random import choice, randint from string import ascii_uppercase from unittest.mock import ANY, MagicMock import arrow import pytest from telegram import Chat, Message, ReplyKeyboardMarkup, Update from telegram.error import BadRequest, NetworkError, TelegramError from freqtrade import __version__ from freqtrade.constants import CANCEL_REASON from freqtrade.edge import PairInfo from freqtrade.enums import RPCMessageType, RunMode, SellType, State from freqtrade.exceptions import OperationalException from freqtrade.freqtradebot import FreqtradeBot from freqtrade.loggers import setup_logging from freqtrade.persistence import PairLocks, Trade from freqtrade.rpc import RPC from freqtrade.rpc.telegram import Telegram, authorized_only from tests.conftest import (create_mock_trades, get_patched_freqtradebot, log_has, log_has_re, patch_exchange, patch_get_signal, patch_whitelist) class DummyCls(Telegram): """ Dummy class for testing the Telegram @authorized_only decorator """ def __init__(self, rpc: RPC, config) -> None: super().__init__(rpc, config) self.state = {'called': False} def _init(self): pass @authorized_only def dummy_handler(self, *args, **kwargs) -> None: """ Fake method that only change the state of the object """ self.state['called'] = True @authorized_only def dummy_exception(self, *args, **kwargs) -> None: """ Fake method that throw an exception """ raise Exception('test') def get_telegram_testobject(mocker, default_conf, mock=True, ftbot=None): msg_mock = MagicMock() if mock: mocker.patch.multiple( 'freqtrade.rpc.telegram.Telegram', _init=MagicMock(), _send_msg=msg_mock ) if not ftbot: ftbot = get_patched_freqtradebot(mocker, default_conf) rpc = RPC(ftbot) telegram = Telegram(rpc, default_conf) return telegram, ftbot, msg_mock def test_telegram__init__(default_conf, mocker) -> None: mocker.patch('freqtrade.rpc.telegram.Updater', MagicMock()) mocker.patch('freqtrade.rpc.telegram.Telegram._init', MagicMock()) telegram, _, _ = get_telegram_testobject(mocker, default_conf) assert telegram._config == default_conf def test_telegram_init(default_conf, mocker, caplog) -> None: start_polling = MagicMock() mocker.patch('freqtrade.rpc.telegram.Updater', MagicMock(return_value=start_polling)) get_telegram_testobject(mocker, default_conf, mock=False) assert start_polling.call_count == 0 # number of handles registered assert start_polling.dispatcher.add_handler.call_count > 0 assert start_polling.start_polling.call_count == 1 message_str = ("rpc.telegram is listening for following commands: [['status'], ['profit'], " "['balance'], ['start'], ['stop'], ['forcesell'], ['forcebuy'], ['trades'], " "['delete'], ['performance'], ['stats'], ['daily'], ['count'], ['locks'], " "['unlock', 'delete_locks'], ['reload_config', 'reload_conf'], " "['show_config', 'show_conf'], ['stopbuy'], " "['whitelist'], ['blacklist'], ['logs'], ['edge'], ['help'], ['version']" "]") assert log_has(message_str, caplog) def test_cleanup(default_conf, mocker, ) -> None: updater_mock = MagicMock() updater_mock.stop = MagicMock() mocker.patch('freqtrade.rpc.telegram.Updater', updater_mock) telegram, _, _ = get_telegram_testobject(mocker, default_conf, mock=False) telegram.cleanup() assert telegram._updater.stop.call_count == 1 def test_authorized_only(default_conf, mocker, caplog, update) -> None: patch_exchange(mocker) caplog.set_level(logging.DEBUG) default_conf['telegram']['enabled'] = False bot = FreqtradeBot(default_conf) rpc = RPC(bot) dummy = DummyCls(rpc, default_conf) patch_get_signal(bot) dummy.dummy_handler(update=update, context=MagicMock()) assert dummy.state['called'] is True assert log_has('Executing handler: dummy_handler for chat_id: 0', caplog) assert not log_has('Rejected unauthorized message from: 0', caplog) assert not log_has('Exception occurred within Telegram module', caplog) def test_authorized_only_unauthorized(default_conf, mocker, caplog) -> None: patch_exchange(mocker) caplog.set_level(logging.DEBUG) chat = Chat(0xdeadbeef, 0) update = Update(randint(1, 100)) update.message = Message(randint(1, 100), datetime.utcnow(), chat) default_conf['telegram']['enabled'] = False bot = FreqtradeBot(default_conf) rpc = RPC(bot) dummy = DummyCls(rpc, default_conf) patch_get_signal(bot) dummy.dummy_handler(update=update, context=MagicMock()) assert dummy.state['called'] is False assert not log_has('Executing handler: dummy_handler for chat_id: 3735928559', caplog) assert log_has('Rejected unauthorized message from: 3735928559', caplog) assert not log_has('Exception occurred within Telegram module', caplog) def test_authorized_only_exception(default_conf, mocker, caplog, update) -> None: patch_exchange(mocker) default_conf['telegram']['enabled'] = False bot = FreqtradeBot(default_conf) rpc = RPC(bot) dummy = DummyCls(rpc, default_conf) patch_get_signal(bot) dummy.dummy_exception(update=update, context=MagicMock()) assert dummy.state['called'] is False assert not log_has('Executing handler: dummy_handler for chat_id: 0', caplog) assert not log_has('Rejected unauthorized message from: 0', caplog) assert log_has('Exception occurred within Telegram module', caplog) def test_telegram_status(default_conf, update, mocker) -> None: update.message.chat.id = "123" default_conf['telegram']['enabled'] = False default_conf['telegram']['chat_id'] = "123" status_table = MagicMock() mocker.patch('freqtrade.rpc.telegram.Telegram._status_table', status_table) mocker.patch.multiple( 'freqtrade.rpc.rpc.RPC', _rpc_trade_status=MagicMock(return_value=[{ 'trade_id': 1, 'pair': 'ETH/BTC', 'base_currency': 'BTC', 'open_date': arrow.utcnow(), 'close_date': None, 'open_rate': 1.099e-05, 'close_rate': None, 'current_rate': 1.098e-05, 'amount': 90.99181074, 'stake_amount': 90.99181074, 'buy_tag': None, 'close_profit_pct': None, 'profit': -0.0059, 'profit_pct': -0.59, 'initial_stop_loss_abs': 1.098e-05, 'stop_loss_abs': 1.099e-05, 'sell_order_status': None, 'initial_stop_loss_pct': -0.05, 'stoploss_current_dist': 1e-08, 'stoploss_current_dist_pct': -0.02, 'stop_loss_pct': -0.01, 'open_order': '(limit buy rem=0.00000000)', 'is_open': True }]), ) telegram, _, msg_mock = get_telegram_testobject(mocker, default_conf) telegram._status(update=update, context=MagicMock()) assert msg_mock.call_count == 1 context = MagicMock() # /status table context.args = ["table"] telegram._status(update=update, context=context) assert status_table.call_count == 1 def test_status_handle(default_conf, update, ticker, fee, mocker) -> None: default_conf['max_open_trades'] = 3 mocker.patch.multiple( 'freqtrade.exchange.Exchange', fetch_ticker=ticker, get_fee=fee, _is_dry_limit_order_filled=MagicMock(return_value=True), ) status_table = MagicMock() mocker.patch.multiple( 'freqtrade.rpc.telegram.Telegram', _status_table=status_table, ) telegram, freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf) patch_get_signal(freqtradebot) freqtradebot.state = State.STOPPED # Status is also enabled when stopped telegram._status(update=update, context=MagicMock()) assert msg_mock.call_count == 1 assert 'no active trade' in msg_mock.call_args_list[0][0][0] msg_mock.reset_mock() freqtradebot.state = State.RUNNING telegram._status(update=update, context=MagicMock()) assert msg_mock.call_count == 1 assert 'no active trade' in msg_mock.call_args_list[0][0][0] msg_mock.reset_mock() # Create some test data freqtradebot.enter_positions() # Trigger status while we have a fulfilled order for the open trade telegram._status(update=update, context=MagicMock()) # close_rate should not be included in the message as the trade is not closed # and no line should be empty lines = msg_mock.call_args_list[0][0][0].split('\n') assert '' not in lines assert 'Close Rate' not in ''.join(lines) assert 'Close Profit' not in ''.join(lines) assert msg_mock.call_count == 3 assert 'ETH/BTC' in msg_mock.call_args_list[0][0][0] assert 'LTC/BTC' in msg_mock.call_args_list[1][0][0] msg_mock.reset_mock() context = MagicMock() context.args = ["2", "3"] telegram._status(update=update, context=context) lines = msg_mock.call_args_list[0][0][0].split('\n') assert '' not in lines assert 'Close Rate' not in ''.join(lines) assert 'Close Profit' not in ''.join(lines) assert msg_mock.call_count == 2 assert 'LTC/BTC' in msg_mock.call_args_list[0][0][0] def test_status_table_handle(default_conf, update, ticker, fee, mocker) -> None: mocker.patch.multiple( 'freqtrade.exchange.Exchange', fetch_ticker=ticker, get_fee=fee, ) default_conf['stake_amount'] = 15.0 telegram, freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf) patch_get_signal(freqtradebot) freqtradebot.state = State.STOPPED # Status table is also enabled when stopped telegram._status_table(update=update, context=MagicMock()) assert msg_mock.call_count == 1 assert 'no active trade' in msg_mock.call_args_list[0][0][0] msg_mock.reset_mock() freqtradebot.state = State.RUNNING telegram._status_table(update=update, context=MagicMock()) assert msg_mock.call_count == 1 assert 'no active trade' in msg_mock.call_args_list[0][0][0] msg_mock.reset_mock() # Create some test data freqtradebot.enter_positions() telegram._status_table(update=update, context=MagicMock()) text = re.sub('</?pre>', '', msg_mock.call_args_list[-1][0][0]) line = text.split("\n") fields = re.sub('[ ]+', ' ', line[2].strip()).split(' ') assert int(fields[0]) == 1 assert 'ETH/BTC' in fields[1] assert msg_mock.call_count == 1 def test_daily_handle(default_conf, update, ticker, limit_buy_order, fee, limit_sell_order, mocker) -> None: default_conf['max_open_trades'] = 1 mocker.patch( 'freqtrade.rpc.rpc.CryptoToFiatConverter._find_price', return_value=15000.0 ) mocker.patch.multiple( 'freqtrade.exchange.Exchange', fetch_ticker=ticker, get_fee=fee, ) telegram, freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf) patch_get_signal(freqtradebot) # Create some test data freqtradebot.enter_positions() trade = Trade.query.first() assert trade # Simulate fulfilled LIMIT_BUY order for trade trade.update(limit_buy_order) # Simulate fulfilled LIMIT_SELL order for trade trade.update(limit_sell_order) trade.close_date = datetime.utcnow() trade.is_open = False # Try valid data # /daily 2 context = MagicMock() context.args = ["2"] telegram._daily(update=update, context=context) assert msg_mock.call_count == 1 assert 'Daily' in msg_mock.call_args_list[0][0][0] assert str(datetime.utcnow().date()) in msg_mock.call_args_list[0][0][0] assert str(' 0.00006217 BTC') in msg_mock.call_args_list[0][0][0] assert str(' 0.933 USD') in msg_mock.call_args_list[0][0][0] assert str(' 1 trade') in msg_mock.call_args_list[0][0][0] assert str(' 0 trade') in msg_mock.call_args_list[0][0][0] # Reset msg_mock msg_mock.reset_mock() context.args = [] telegram._daily(update=update, context=context) assert msg_mock.call_count == 1 assert 'Daily' in msg_mock.call_args_list[0][0][0] assert str(datetime.utcnow().date()) in msg_mock.call_args_list[0][0][0] assert str(' 0.00006217 BTC') in msg_mock.call_args_list[0][0][0] assert str(' 0.933 USD') in msg_mock.call_args_list[0][0][0] assert str(' 1 trade') in msg_mock.call_args_list[0][0][0] assert str(' 0 trade') in msg_mock.call_args_list[0][0][0] # Reset msg_mock msg_mock.reset_mock() freqtradebot.config['max_open_trades'] = 2 # Add two other trades n = freqtradebot.enter_positions() assert n == 2 trades = Trade.query.all() for trade in trades: trade.update(limit_buy_order) trade.update(limit_sell_order) trade.close_date = datetime.utcnow() trade.is_open = False # /daily 1 context = MagicMock() context.args = ["1"] telegram._daily(update=update, context=context) assert str(' 0.00018651 BTC') in msg_mock.call_args_list[0][0][0] assert str(' 2.798 USD') in msg_mock.call_args_list[0][0][0] assert str(' 3 trades') in msg_mock.call_args_list[0][0][0] def test_daily_wrong_input(default_conf, update, ticker, mocker) -> None: mocker.patch.multiple( 'freqtrade.exchange.Exchange', fetch_ticker=ticker ) telegram, freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf) patch_get_signal(freqtradebot) # Try invalid data msg_mock.reset_mock() freqtradebot.state = State.RUNNING # /daily -2 context = MagicMock() context.args = ["-2"] telegram._daily(update=update, context=context) assert msg_mock.call_count == 1 assert 'must be an integer greater than 0' in msg_mock.call_args_list[0][0][0] # Try invalid data msg_mock.reset_mock() freqtradebot.state = State.RUNNING # /daily today context = MagicMock() context.args = ["today"] telegram._daily(update=update, context=context) assert str('Daily Profit over the last 7 days') in msg_mock.call_args_list[0][0][0] def test_profit_handle(default_conf, update, ticker, ticker_sell_up, fee, limit_buy_order, limit_sell_order, mocker) -> None: mocker.patch('freqtrade.rpc.rpc.CryptoToFiatConverter._find_price', return_value=15000.0) mocker.patch.multiple( 'freqtrade.exchange.Exchange', fetch_ticker=ticker, get_fee=fee, ) telegram, freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf) patch_get_signal(freqtradebot) telegram._profit(update=update, context=MagicMock()) assert msg_mock.call_count == 1 assert 'No trades yet.' in msg_mock.call_args_list[0][0][0] msg_mock.reset_mock() # Create some test data freqtradebot.enter_positions() trade = Trade.query.first() # Simulate fulfilled LIMIT_BUY order for trade trade.update(limit_buy_order) context = MagicMock() # Test with invalid 2nd argument (should silently pass) context.args = ["aaa"] telegram._profit(update=update, context=context) assert msg_mock.call_count == 1 assert 'No closed trade' in msg_mock.call_args_list[-1][0][0] assert '*ROI:* All trades' in msg_mock.call_args_list[-1][0][0] mocker.patch('freqtrade.wallets.Wallets.get_starting_balance', return_value=0.01) assert ('∙ `-0.00000500 BTC (-0.50%) (-0.0 \N{GREEK CAPITAL LETTER SIGMA}%)`' in msg_mock.call_args_list[-1][0][0]) msg_mock.reset_mock() # Update the ticker with a market going up mocker.patch('freqtrade.exchange.Exchange.fetch_ticker', ticker_sell_up) trade.update(limit_sell_order) trade.close_date = datetime.utcnow() trade.is_open = False telegram._profit(update=update, context=MagicMock()) assert msg_mock.call_count == 1 assert '*ROI:* Closed trades' in msg_mock.call_args_list[-1][0][0] assert ('∙ `0.00006217 BTC (6.20%) (0.62 \N{GREEK CAPITAL LETTER SIGMA}%)`' in msg_mock.call_args_list[-1][0][0]) assert '∙ `0.933 USD`' in msg_mock.call_args_list[-1][0][0] assert '*ROI:* All trades' in msg_mock.call_args_list[-1][0][0] assert ('∙ `0.00006217 BTC (6.20%) (0.62 \N{GREEK CAPITAL LETTER SIGMA}%)`' in msg_mock.call_args_list[-1][0][0]) assert '∙ `0.933 USD`' in msg_mock.call_args_list[-1][0][0] assert '*Best Performing:* `ETH/BTC: 6.20%`' in msg_mock.call_args_list[-1][0][0] def test_telegram_stats(default_conf, update, ticker, ticker_sell_up, fee, limit_buy_order, limit_sell_order, mocker) -> None: mocker.patch('freqtrade.rpc.rpc.CryptoToFiatConverter._find_price', return_value=15000.0) mocker.patch.multiple( 'freqtrade.exchange.Exchange', fetch_ticker=ticker, get_fee=fee, ) telegram, freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf) patch_get_signal(freqtradebot) telegram._stats(update=update, context=MagicMock()) assert msg_mock.call_count == 1 # assert 'No trades yet.' in msg_mock.call_args_list[0][0][0] msg_mock.reset_mock() # Create some test data create_mock_trades(fee) telegram._stats(update=update, context=MagicMock()) assert msg_mock.call_count == 1 assert 'Sell Reason' in msg_mock.call_args_list[-1][0][0] assert 'ROI' in msg_mock.call_args_list[-1][0][0] assert 'Avg. Duration' in msg_mock.call_args_list[-1][0][0] msg_mock.reset_mock() def test_telegram_balance_handle(default_conf, update, mocker, rpc_balance, tickers) -> None: default_conf['dry_run'] = False mocker.patch('freqtrade.exchange.Exchange.get_balances', return_value=rpc_balance) mocker.patch('freqtrade.exchange.Exchange.get_tickers', tickers) mocker.patch('freqtrade.exchange.Exchange.get_valid_pair_combination', side_effect=lambda a, b: f"{a}/{b}") telegram, freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf) patch_get_signal(freqtradebot) telegram._balance(update=update, context=MagicMock()) result = msg_mock.call_args_list[0][0][0] assert msg_mock.call_count == 1 assert '*BTC:*' in result assert '*ETH:*' not in result assert '*USDT:*' not in result assert '*EUR:*' not in result assert '*LTC:*' in result assert '*XRP:*' not in result assert 'Balance:' in result assert 'Est. BTC:' in result assert 'BTC: 12.00000000' in result assert "*3 Other Currencies (< 0.0001 BTC):*" in result assert 'BTC: 0.00000309' in result def test_balance_handle_empty_response(default_conf, update, mocker) -> None: default_conf['dry_run'] = False mocker.patch('freqtrade.exchange.Exchange.get_balances', return_value={}) telegram, freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf) patch_get_signal(freqtradebot) freqtradebot.config['dry_run'] = False telegram._balance(update=update, context=MagicMock()) result = msg_mock.call_args_list[0][0][0] assert msg_mock.call_count == 1 assert 'All balances are zero.' in result def test_balance_handle_empty_response_dry(default_conf, update, mocker) -> None: mocker.patch('freqtrade.exchange.Exchange.get_balances', return_value={}) telegram, freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf) patch_get_signal(freqtradebot) telegram._balance(update=update, context=MagicMock()) result = msg_mock.call_args_list[0][0][0] assert msg_mock.call_count == 1 assert "*Warning:* Simulated balances in Dry Mode." in result assert "Starting capital: `1000` BTC" in result def test_balance_handle_too_large_response(default_conf, update, mocker) -> None: balances = [] for i in range(100): curr = choice(ascii_uppercase) + choice(ascii_uppercase) + choice(ascii_uppercase) balances.append({ 'currency': curr, 'free': 1.0, 'used': 0.5, 'balance': i, 'est_stake': 1, 'stake': 'BTC', }) mocker.patch('freqtrade.rpc.rpc.RPC._rpc_balance', return_value={ 'currencies': balances, 'total': 100.0, 'symbol': 100.0, 'value': 1000.0, 'starting_capital': 1000, 'starting_capital_fiat': 1000, }) telegram, freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf) patch_get_signal(freqtradebot) telegram._balance(update=update, context=MagicMock()) assert msg_mock.call_count > 1 # Test if wrap happens around 4000 - # and each single currency-output is around 120 characters long so we need # an offset to avoid random test failures assert len(msg_mock.call_args_list[0][0][0]) < 4096 assert len(msg_mock.call_args_list[0][0][0]) > (4096 - 120) def test_start_handle(default_conf, update, mocker) -> None: telegram, freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf) freqtradebot.state = State.STOPPED assert freqtradebot.state == State.STOPPED telegram._start(update=update, context=MagicMock()) assert freqtradebot.state == State.RUNNING assert msg_mock.call_count == 1 def test_start_handle_already_running(default_conf, update, mocker) -> None: telegram, freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf) freqtradebot.state = State.RUNNING assert freqtradebot.state == State.RUNNING telegram._start(update=update, context=MagicMock()) assert freqtradebot.state == State.RUNNING assert msg_mock.call_count == 1 assert 'already running' in msg_mock.call_args_list[0][0][0] def test_stop_handle(default_conf, update, mocker) -> None: telegram, freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf) freqtradebot.state = State.RUNNING assert freqtradebot.state == State.RUNNING telegram._stop(update=update, context=MagicMock()) assert freqtradebot.state == State.STOPPED assert msg_mock.call_count == 1 assert 'stopping trader' in msg_mock.call_args_list[0][0][0] def test_stop_handle_already_stopped(default_conf, update, mocker) -> None: telegram, freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf) freqtradebot.state = State.STOPPED assert freqtradebot.state == State.STOPPED telegram._stop(update=update, context=MagicMock()) assert freqtradebot.state == State.STOPPED assert msg_mock.call_count == 1 assert 'already stopped' in msg_mock.call_args_list[0][0][0] def test_stopbuy_handle(default_conf, update, mocker) -> None: telegram, freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf) assert freqtradebot.config['max_open_trades'] != 0 telegram._stopbuy(update=update, context=MagicMock()) assert freqtradebot.config['max_open_trades'] == 0 assert msg_mock.call_count == 1 assert 'No more buy will occur from now. Run /reload_config to reset.' \ in msg_mock.call_args_list[0][0][0] def test_reload_config_handle(default_conf, update, mocker) -> None: telegram, freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf) freqtradebot.state = State.RUNNING assert freqtradebot.state == State.RUNNING telegram._reload_config(update=update, context=MagicMock()) assert freqtradebot.state == State.RELOAD_CONFIG assert msg_mock.call_count == 1 assert 'Reloading config' in msg_mock.call_args_list[0][0][0] def test_telegram_forcesell_handle(default_conf, update, ticker, fee, ticker_sell_up, mocker) -> None: mocker.patch('freqtrade.rpc.rpc.CryptoToFiatConverter._find_price', return_value=15000.0) msg_mock = mocker.patch('freqtrade.rpc.telegram.Telegram.send_msg', MagicMock()) mocker.patch('freqtrade.rpc.telegram.Telegram._init', MagicMock()) patch_exchange(mocker) patch_whitelist(mocker, default_conf) mocker.patch.multiple( 'freqtrade.exchange.Exchange', fetch_ticker=ticker, get_fee=fee, _is_dry_limit_order_filled=MagicMock(return_value=True), ) freqtradebot = FreqtradeBot(default_conf) rpc = RPC(freqtradebot) telegram = Telegram(rpc, default_conf) patch_get_signal(freqtradebot) # Create some test data freqtradebot.enter_positions() trade = Trade.query.first() assert trade # Increase the price and sell it mocker.patch('freqtrade.exchange.Exchange.fetch_ticker', ticker_sell_up) # /forcesell 1 context = MagicMock() context.args = ["1"] telegram._forcesell(update=update, context=context) assert msg_mock.call_count == 4 last_msg = msg_mock.call_args_list[-1][0][0] assert { 'type': RPCMessageType.SELL, 'trade_id': 1, 'exchange': 'Binance', 'pair': 'ETH/BTC', 'gain': 'profit', 'limit': 1.173e-05, 'amount': 91.07468123, 'order_type': 'limit', 'open_rate': 1.098e-05, 'current_rate': 1.173e-05, 'profit_amount': 6.314e-05, 'profit_ratio': 0.0629778, 'stake_currency': 'BTC', 'fiat_currency': 'USD', 'sell_reason': SellType.FORCE_SELL.value, 'open_date': ANY, 'close_date': ANY, 'close_rate': ANY, } == last_msg def test_telegram_forcesell_down_handle(default_conf, update, ticker, fee, ticker_sell_down, mocker) -> None: mocker.patch('freqtrade.rpc.fiat_convert.CryptoToFiatConverter._find_price', return_value=15000.0) msg_mock = mocker.patch('freqtrade.rpc.telegram.Telegram.send_msg', MagicMock()) mocker.patch('freqtrade.rpc.telegram.Telegram._init', MagicMock()) patch_exchange(mocker) patch_whitelist(mocker, default_conf) mocker.patch.multiple( 'freqtrade.exchange.Exchange', fetch_ticker=ticker, get_fee=fee, _is_dry_limit_order_filled=MagicMock(return_value=True), ) freqtradebot = FreqtradeBot(default_conf) rpc = RPC(freqtradebot) telegram = Telegram(rpc, default_conf) patch_get_signal(freqtradebot) # Create some test data freqtradebot.enter_positions() # Decrease the price and sell it mocker.patch.multiple( 'freqtrade.exchange.Exchange', fetch_ticker=ticker_sell_down ) trade = Trade.query.first() assert trade # /forcesell 1 context = MagicMock() context.args = ["1"] telegram._forcesell(update=update, context=context) assert msg_mock.call_count == 4 last_msg = msg_mock.call_args_list[-1][0][0] assert { 'type': RPCMessageType.SELL, 'trade_id': 1, 'exchange': 'Binance', 'pair': 'ETH/BTC', 'gain': 'loss', 'limit': 1.043e-05, 'amount': 91.07468123, 'order_type': 'limit', 'open_rate': 1.098e-05, 'current_rate': 1.043e-05, 'profit_amount': -5.497e-05, 'profit_ratio': -0.05482878, 'stake_currency': 'BTC', 'fiat_currency': 'USD', 'sell_reason': SellType.FORCE_SELL.value, 'open_date': ANY, 'close_date': ANY, 'close_rate': ANY, } == last_msg def test_forcesell_all_handle(default_conf, update, ticker, fee, mocker) -> None: patch_exchange(mocker) mocker.patch('freqtrade.rpc.fiat_convert.CryptoToFiatConverter._find_price', return_value=15000.0) msg_mock = mocker.patch('freqtrade.rpc.telegram.Telegram.send_msg', MagicMock()) mocker.patch('freqtrade.rpc.telegram.Telegram._init', MagicMock()) patch_whitelist(mocker, default_conf) mocker.patch.multiple( 'freqtrade.exchange.Exchange', fetch_ticker=ticker, get_fee=fee, _is_dry_limit_order_filled=MagicMock(return_value=True), ) default_conf['max_open_trades'] = 4 freqtradebot = FreqtradeBot(default_conf) rpc = RPC(freqtradebot) telegram = Telegram(rpc, default_conf) patch_get_signal(freqtradebot) # Create some test data freqtradebot.enter_positions() msg_mock.reset_mock() # /forcesell all context = MagicMock() context.args = ["all"] telegram._forcesell(update=update, context=context) # Called for each trade 2 times assert msg_mock.call_count == 8 msg = msg_mock.call_args_list[1][0][0] assert { 'type': RPCMessageType.SELL, 'trade_id': 1, 'exchange': 'Binance', 'pair': 'ETH/BTC', 'gain': 'loss', 'limit': 1.099e-05, 'amount': 91.07468123, 'order_type': 'limit', 'open_rate': 1.098e-05, 'current_rate': 1.099e-05, 'profit_amount': -4.09e-06, 'profit_ratio': -0.00408133, 'stake_currency': 'BTC', 'fiat_currency': 'USD', 'sell_reason': SellType.FORCE_SELL.value, 'open_date': ANY, 'close_date': ANY, 'close_rate': ANY, } == msg def test_forcesell_handle_invalid(default_conf, update, mocker) -> None: mocker.patch('freqtrade.rpc.fiat_convert.CryptoToFiatConverter._find_price', return_value=15000.0) telegram, freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf) patch_get_signal(freqtradebot) # Trader is not running freqtradebot.state = State.STOPPED # /forcesell 1 context = MagicMock() context.args = ["1"] telegram._forcesell(update=update, context=context) assert msg_mock.call_count == 1 assert 'not running' in msg_mock.call_args_list[0][0][0] # No argument msg_mock.reset_mock() freqtradebot.state = State.RUNNING context = MagicMock() context.args = [] telegram._forcesell(update=update, context=context) assert msg_mock.call_count == 1 assert "You must specify a trade-id or 'all'." in msg_mock.call_args_list[0][0][0] # Invalid argument msg_mock.reset_mock() freqtradebot.state = State.RUNNING # /forcesell 123456 context = MagicMock() context.args = ["123456"] telegram._forcesell(update=update, context=context) assert msg_mock.call_count == 1 assert 'invalid argument' in msg_mock.call_args_list[0][0][0] def test_forcebuy_handle(default_conf, update, mocker) -> None: mocker.patch('freqtrade.rpc.rpc.CryptoToFiatConverter._find_price', return_value=15000.0) fbuy_mock = MagicMock(return_value=None) mocker.patch('freqtrade.rpc.RPC._rpc_forcebuy', fbuy_mock) telegram, freqtradebot, _ = get_telegram_testobject(mocker, default_conf) patch_get_signal(freqtradebot) # /forcebuy ETH/BTC context = MagicMock() context.args = ["ETH/BTC"] telegram._forcebuy(update=update, context=context) assert fbuy_mock.call_count == 1 assert fbuy_mock.call_args_list[0][0][0] == 'ETH/BTC' assert fbuy_mock.call_args_list[0][0][1] is None # Reset and retry with specified price fbuy_mock = MagicMock(return_value=None) mocker.patch('freqtrade.rpc.RPC._rpc_forcebuy', fbuy_mock) # /forcebuy ETH/BTC 0.055 context = MagicMock() context.args = ["ETH/BTC", "0.055"] telegram._forcebuy(update=update, context=context) assert fbuy_mock.call_count == 1 assert fbuy_mock.call_args_list[0][0][0] == 'ETH/BTC' assert isinstance(fbuy_mock.call_args_list[0][0][1], float) assert fbuy_mock.call_args_list[0][0][1] == 0.055 def test_forcebuy_handle_exception(default_conf, update, mocker) -> None: mocker.patch('freqtrade.rpc.rpc.CryptoToFiatConverter._find_price', return_value=15000.0) telegram, freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf) patch_get_signal(freqtradebot) update.message.text = '/forcebuy ETH/Nonepair' telegram._forcebuy(update=update, context=MagicMock()) assert msg_mock.call_count == 1 assert msg_mock.call_args_list[0][0][0] == 'Forcebuy not enabled.' def test_forcebuy_no_pair(default_conf, update, mocker) -> None: mocker.patch('freqtrade.rpc.rpc.CryptoToFiatConverter._find_price', return_value=15000.0) fbuy_mock = MagicMock(return_value=None) mocker.patch('freqtrade.rpc.RPC._rpc_forcebuy', fbuy_mock) telegram, freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf) patch_get_signal(freqtradebot) context = MagicMock() context.args = [] telegram._forcebuy(update=update, context=context) assert fbuy_mock.call_count == 0 assert msg_mock.call_count == 1 assert msg_mock.call_args_list[0][1]['msg'] == 'Which pair?' # assert msg_mock.call_args_list[0][1]['callback_query_handler'] == 'forcebuy' keyboard = msg_mock.call_args_list[0][1]['keyboard'] assert reduce(lambda acc, x: acc + len(x), keyboard, 0) == 4 update = MagicMock() update.callback_query = MagicMock() update.callback_query.data = 'XRP/USDT' telegram._forcebuy_inline(update, None) assert fbuy_mock.call_count == 1 def test_performance_handle(default_conf, update, ticker, fee, limit_buy_order, limit_sell_order, mocker) -> None: mocker.patch.multiple( 'freqtrade.exchange.Exchange', fetch_ticker=ticker, get_fee=fee, ) telegram, freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf) patch_get_signal(freqtradebot) # Create some test data freqtradebot.enter_positions() trade = Trade.query.first() assert trade # Simulate fulfilled LIMIT_BUY order for trade trade.update(limit_buy_order) # Simulate fulfilled LIMIT_SELL order for trade trade.update(limit_sell_order) trade.close_date = datetime.utcnow() trade.is_open = False telegram._performance(update=update, context=MagicMock()) assert msg_mock.call_count == 1 assert 'Performance' in msg_mock.call_args_list[0][0][0] assert '<code>ETH/BTC\t0.00006217 BTC (6.20%) (1)</code>' in msg_mock.call_args_list[0][0][0] def test_count_handle(default_conf, update, ticker, fee, mocker) -> None: mocker.patch.multiple( 'freqtrade.exchange.Exchange', fetch_ticker=ticker, get_fee=fee, ) telegram, freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf) patch_get_signal(freqtradebot) freqtradebot.state = State.STOPPED telegram._count(update=update, context=MagicMock()) assert msg_mock.call_count == 1 assert 'not running' in msg_mock.call_args_list[0][0][0] msg_mock.reset_mock() freqtradebot.state = State.RUNNING # Create some test data freqtradebot.enter_positions() msg_mock.reset_mock() telegram._count(update=update, context=MagicMock()) msg = ('<pre> current max total stake\n--------- ----- -------------\n' ' 1 {} {}</pre>').format( default_conf['max_open_trades'], default_conf['stake_amount'] ) assert msg in msg_mock.call_args_list[0][0][0] def test_telegram_lock_handle(default_conf, update, ticker, fee, mocker) -> None: mocker.patch.multiple( 'freqtrade.exchange.Exchange', fetch_ticker=ticker, get_fee=fee, ) telegram, freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf) patch_get_signal(freqtradebot) telegram._locks(update=update, context=MagicMock()) assert msg_mock.call_count == 1 assert 'No active locks.' in msg_mock.call_args_list[0][0][0] msg_mock.reset_mock() PairLocks.lock_pair('ETH/BTC', arrow.utcnow().shift(minutes=4).datetime, 'randreason') PairLocks.lock_pair('XRP/BTC', arrow.utcnow().shift(minutes=20).datetime, 'deadbeef') telegram._locks(update=update, context=MagicMock()) assert 'Pair' in msg_mock.call_args_list[0][0][0] assert 'Until' in msg_mock.call_args_list[0][0][0] assert 'Reason\n' in msg_mock.call_args_list[0][0][0] assert 'ETH/BTC' in msg_mock.call_args_list[0][0][0] assert 'XRP/BTC' in msg_mock.call_args_list[0][0][0] assert 'deadbeef' in msg_mock.call_args_list[0][0][0] assert 'randreason' in msg_mock.call_args_list[0][0][0] context = MagicMock() context.args = ['XRP/BTC'] msg_mock.reset_mock() telegram._delete_locks(update=update, context=context) assert 'ETH/BTC' in msg_mock.call_args_list[0][0][0] assert 'randreason' in msg_mock.call_args_list[0][0][0] assert 'XRP/BTC' not in msg_mock.call_args_list[0][0][0] assert 'deadbeef' not in msg_mock.call_args_list[0][0][0] def test_whitelist_static(default_conf, update, mocker) -> None: telegram, freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf) telegram._whitelist(update=update, context=MagicMock()) assert msg_mock.call_count == 1 assert ("Using whitelist `['StaticPairList']` with 4 pairs\n" "`ETH/BTC, LTC/BTC, XRP/BTC, NEO/BTC`" in msg_mock.call_args_list[0][0][0]) def test_whitelist_dynamic(default_conf, update, mocker) -> None: mocker.patch('freqtrade.exchange.Exchange.exchange_has', MagicMock(return_value=True)) default_conf['pairlists'] = [{'method': 'VolumePairList', 'number_assets': 4 }] telegram, _, msg_mock = get_telegram_testobject(mocker, default_conf) telegram._whitelist(update=update, context=MagicMock()) assert msg_mock.call_count == 1 assert ("Using whitelist `['VolumePairList']` with 4 pairs\n" "`ETH/BTC, LTC/BTC, XRP/BTC, NEO/BTC`" in msg_mock.call_args_list[0][0][0]) def test_blacklist_static(default_conf, update, mocker) -> None: telegram, freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf) telegram._blacklist(update=update, context=MagicMock()) assert msg_mock.call_count == 1 assert ("Blacklist contains 2 pairs\n`DOGE/BTC, HOT/BTC`" in msg_mock.call_args_list[0][0][0]) msg_mock.reset_mock() # /blacklist ETH/BTC context = MagicMock() context.args = ["ETH/BTC"] telegram._blacklist(update=update, context=context) assert msg_mock.call_count == 1 assert ("Blacklist contains 3 pairs\n`DOGE/BTC, HOT/BTC, ETH/BTC`" in msg_mock.call_args_list[0][0][0]) assert freqtradebot.pairlists.blacklist == ["DOGE/BTC", "HOT/BTC", "ETH/BTC"] msg_mock.reset_mock() context = MagicMock() context.args = ["XRP/.*"] telegram._blacklist(update=update, context=context) assert msg_mock.call_count == 1 assert ("Blacklist contains 4 pairs\n`DOGE/BTC, HOT/BTC, ETH/BTC, XRP/.*`" in msg_mock.call_args_list[0][0][0]) assert freqtradebot.pairlists.blacklist == ["DOGE/BTC", "HOT/BTC", "ETH/BTC", "XRP/.*"] def test_telegram_logs(default_conf, update, mocker) -> None: mocker.patch.multiple( 'freqtrade.rpc.telegram.Telegram', _init=MagicMock(), ) setup_logging(default_conf) telegram, _, msg_mock = get_telegram_testobject(mocker, default_conf) context = MagicMock() context.args = [] telegram._logs(update=update, context=context) assert msg_mock.call_count == 1 assert "freqtrade\\.rpc\\.telegram" in msg_mock.call_args_list[0][0][0] msg_mock.reset_mock() context.args = ["1"] telegram._logs(update=update, context=context) assert msg_mock.call_count == 1 msg_mock.reset_mock() # Test with changed MaxMessageLength mocker.patch('freqtrade.rpc.telegram.MAX_TELEGRAM_MESSAGE_LENGTH', 200) context = MagicMock() context.args = [] telegram._logs(update=update, context=context) # Called at least 2 times. Exact times will change with unrelated changes to setup messages # Therefore we don't test for this explicitly. assert msg_mock.call_count >= 2 def test_edge_disabled(default_conf, update, mocker) -> None: telegram, _, msg_mock = get_telegram_testobject(mocker, default_conf) telegram._edge(update=update, context=MagicMock()) assert msg_mock.call_count == 1 assert "Edge is not enabled." in msg_mock.call_args_list[0][0][0] def test_edge_enabled(edge_conf, update, mocker) -> None: mocker.patch('freqtrade.edge.Edge._cached_pairs', mocker.PropertyMock( return_value={ 'E/F': PairInfo(-0.01, 0.66, 3.71, 0.50, 1.71, 10, 60), } )) telegram, _, msg_mock = get_telegram_testobject(mocker, edge_conf) telegram._edge(update=update, context=MagicMock()) assert msg_mock.call_count == 1 assert '<b>Edge only validated following pairs:</b>\n<pre>' in msg_mock.call_args_list[0][0][0] assert 'Pair Winrate Expectancy Stoploss' in msg_mock.call_args_list[0][0][0] msg_mock.reset_mock() mocker.patch('freqtrade.edge.Edge._cached_pairs', mocker.PropertyMock( return_value={})) telegram._edge(update=update, context=MagicMock()) assert msg_mock.call_count == 1 assert '<b>Edge only validated following pairs:</b>' in msg_mock.call_args_list[0][0][0] assert 'Winrate' not in msg_mock.call_args_list[0][0][0] def test_telegram_trades(mocker, update, default_conf, fee): telegram, _, msg_mock = get_telegram_testobject(mocker, default_conf) context = MagicMock() context.args = [] telegram._trades(update=update, context=context) assert "<b>0 recent trades</b>:" in msg_mock.call_args_list[0][0][0] assert "<pre>" not in msg_mock.call_args_list[0][0][0] msg_mock.reset_mock() context.args = ['hello'] telegram._trades(update=update, context=context) assert "<b>0 recent trades</b>:" in msg_mock.call_args_list[0][0][0] assert "<pre>" not in msg_mock.call_args_list[0][0][0] msg_mock.reset_mock() create_mock_trades(fee) context = MagicMock() context.args = [5] telegram._trades(update=update, context=context) msg_mock.call_count == 1 assert "2 recent trades</b>:" in msg_mock.call_args_list[0][0][0] assert "Profit (" in msg_mock.call_args_list[0][0][0] assert "Close Date" in msg_mock.call_args_list[0][0][0] assert "<pre>" in msg_mock.call_args_list[0][0][0] assert bool(re.search(r"just now[ ]*XRP\/BTC \(#3\) 1.00% \(", msg_mock.call_args_list[0][0][0])) def test_telegram_delete_trade(mocker, update, default_conf, fee): telegram, _, msg_mock = get_telegram_testobject(mocker, default_conf) context = MagicMock() context.args = [] telegram._delete_trade(update=update, context=context) assert "Trade-id not set." in msg_mock.call_args_list[0][0][0] msg_mock.reset_mock() create_mock_trades(fee) context = MagicMock() context.args = [1] telegram._delete_trade(update=update, context=context) msg_mock.call_count == 1 assert "Deleted trade 1." in msg_mock.call_args_list[0][0][0] assert "Please make sure to take care of this asset" in msg_mock.call_args_list[0][0][0] def test_help_handle(default_conf, update, mocker) -> None: telegram, _, msg_mock = get_telegram_testobject(mocker, default_conf) telegram._help(update=update, context=MagicMock()) assert msg_mock.call_count == 1 assert '*/help:* `This help message`' in msg_mock.call_args_list[0][0][0] def test_version_handle(default_conf, update, mocker) -> None: telegram, _, msg_mock = get_telegram_testobject(mocker, default_conf) telegram._version(update=update, context=MagicMock()) assert msg_mock.call_count == 1 assert '*Version:* `{}`'.format(__version__) in msg_mock.call_args_list[0][0][0] def test_show_config_handle(default_conf, update, mocker) -> None: default_conf['runmode'] = RunMode.DRY_RUN telegram, freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf) telegram._show_config(update=update, context=MagicMock()) assert msg_mock.call_count == 1 assert '*Mode:* `{}`'.format('Dry-run') in msg_mock.call_args_list[0][0][0] assert '*Exchange:* `binance`' in msg_mock.call_args_list[0][0][0] assert '*Strategy:* `StrategyTestV2`' in msg_mock.call_args_list[0][0][0] assert '*Stoploss:* `-0.1`' in msg_mock.call_args_list[0][0][0] msg_mock.reset_mock() freqtradebot.config['trailing_stop'] = True telegram._show_config(update=update, context=MagicMock()) assert msg_mock.call_count == 1 assert '*Mode:* `{}`'.format('Dry-run') in msg_mock.call_args_list[0][0][0] assert '*Exchange:* `binance`' in msg_mock.call_args_list[0][0][0] assert '*Strategy:* `StrategyTestV2`' in msg_mock.call_args_list[0][0][0] assert '*Initial Stoploss:* `-0.1`' in msg_mock.call_args_list[0][0][0] def test_send_msg_buy_notification(default_conf, mocker, caplog) -> None: msg = { 'type': RPCMessageType.BUY, 'trade_id': 1, 'buy_tag': 'buy_signal_01', 'exchange': 'Binance', 'pair': 'ETH/BTC', 'limit': 1.099e-05, 'order_type': 'limit', 'stake_amount': 0.001, 'stake_amount_fiat': 0.0, 'stake_currency': 'BTC', 'fiat_currency': 'USD', 'current_rate': 1.099e-05, 'amount': 1333.3333333333335, 'open_date': arrow.utcnow().shift(hours=-1) } telegram, freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf) telegram.send_msg(msg) assert msg_mock.call_args[0][0] \ == '\N{LARGE BLUE CIRCLE} *Binance:* Buying ETH/BTC (#1)\n' \ '*Buy Tag:* `buy_signal_01`\n' \ '*Amount:* `1333.33333333`\n' \ '*Open Rate:* `0.00001099`\n' \ '*Current Rate:* `0.00001099`\n' \ '*Total:* `(0.00100000 BTC, 12.345 USD)`' freqtradebot.config['telegram']['notification_settings'] = {'buy': 'off'} caplog.clear() msg_mock.reset_mock() telegram.send_msg(msg) msg_mock.call_count == 0 log_has("Notification 'buy' not sent.", caplog) freqtradebot.config['telegram']['notification_settings'] = {'buy': 'silent'} caplog.clear() msg_mock.reset_mock() telegram.send_msg(msg) msg_mock.call_count == 1 msg_mock.call_args_list[0][1]['disable_notification'] is True def test_send_msg_buy_cancel_notification(default_conf, mocker) -> None: telegram, _, msg_mock = get_telegram_testobject(mocker, default_conf) telegram.send_msg({ 'type': RPCMessageType.BUY_CANCEL, 'buy_tag': 'buy_signal_01', 'trade_id': 1, 'exchange': 'Binance', 'pair': 'ETH/BTC', 'reason': CANCEL_REASON['TIMEOUT'] }) assert (msg_mock.call_args[0][0] == '\N{WARNING SIGN} *Binance:* ' 'Cancelling open buy Order for ETH/BTC (#1). ' 'Reason: cancelled due to timeout.') def test_send_msg_buy_fill_notification(default_conf, mocker) -> None: default_conf['telegram']['notification_settings']['buy_fill'] = 'on' telegram, _, msg_mock = get_telegram_testobject(mocker, default_conf) telegram.send_msg({ 'type': RPCMessageType.BUY_FILL, 'buy_tag': 'buy_signal_01', 'trade_id': 1, 'exchange': 'Binance', 'pair': 'ETH/USDT', 'open_rate': 200, 'stake_amount': 100, 'amount': 0.5, 'open_date': arrow.utcnow().datetime }) assert (msg_mock.call_args[0][0] == '\N{LARGE CIRCLE} *Binance:* ' 'Buy order for ETH/USDT (#1) filled for 200.') def test_send_msg_sell_notification(default_conf, mocker) -> None: telegram, _, msg_mock = get_telegram_testobject(mocker, default_conf) old_convamount = telegram._rpc._fiat_converter.convert_amount telegram._rpc._fiat_converter.convert_amount = lambda a, b, c: -24.812 telegram.send_msg({ 'type': RPCMessageType.SELL, 'trade_id': 1, 'exchange': 'Binance', 'pair': 'KEY/ETH', 'gain': 'loss', 'limit': 3.201e-05, 'amount': 1333.3333333333335, 'order_type': 'market', 'open_rate': 7.5e-05, 'current_rate': 3.201e-05, 'profit_amount': -0.05746268, 'profit_ratio': -0.57405275, 'stake_currency': 'ETH', 'fiat_currency': 'USD', 'sell_reason': SellType.STOP_LOSS.value, 'open_date': arrow.utcnow().shift(hours=-1), 'close_date': arrow.utcnow(), }) assert msg_mock.call_args[0][0] \ == ('\N{WARNING SIGN} *Binance:* Selling KEY/ETH (#1)\n' '*Profit:* `-57.41% (loss: -0.05746268 ETH / -24.812 USD)`\n' '*Sell Reason:* `stop_loss`\n' '*Duration:* `1:00:00 (60.0 min)`\n' '*Amount:* `1333.33333333`\n' '*Open Rate:* `0.00007500`\n' '*Current Rate:* `0.00003201`\n' '*Close Rate:* `0.00003201`' ) msg_mock.reset_mock() telegram.send_msg({ 'type': RPCMessageType.SELL, 'trade_id': 1, 'exchange': 'Binance', 'pair': 'KEY/ETH', 'gain': 'loss', 'limit': 3.201e-05, 'amount': 1333.3333333333335, 'order_type': 'market', 'open_rate': 7.5e-05, 'current_rate': 3.201e-05, 'profit_amount': -0.05746268, 'profit_ratio': -0.57405275, 'stake_currency': 'ETH', 'sell_reason': SellType.STOP_LOSS.value, 'open_date': arrow.utcnow().shift(days=-1, hours=-2, minutes=-30), 'close_date': arrow.utcnow(), }) assert msg_mock.call_args[0][0] \ == ('\N{WARNING SIGN} *Binance:* Selling KEY/ETH (#1)\n' '*Profit:* `-57.41%`\n' '*Sell Reason:* `stop_loss`\n' '*Duration:* `1 day, 2:30:00 (1590.0 min)`\n' '*Amount:* `1333.33333333`\n' '*Open Rate:* `0.00007500`\n' '*Current Rate:* `0.00003201`\n' '*Close Rate:* `0.00003201`' ) # Reset singleton function to avoid random breaks telegram._rpc._fiat_converter.convert_amount = old_convamount def test_send_msg_sell_cancel_notification(default_conf, mocker) -> None: telegram, _, msg_mock = get_telegram_testobject(mocker, default_conf) old_convamount = telegram._rpc._fiat_converter.convert_amount telegram._rpc._fiat_converter.convert_amount = lambda a, b, c: -24.812 telegram.send_msg({ 'type': RPCMessageType.SELL_CANCEL, 'trade_id': 1, 'exchange': 'Binance', 'pair': 'KEY/ETH', 'reason': 'Cancelled on exchange' }) assert msg_mock.call_args[0][0] \ == ('\N{WARNING SIGN} *Binance:* Cancelling open sell Order for KEY/ETH (#1).' ' Reason: Cancelled on exchange.') msg_mock.reset_mock() telegram.send_msg({ 'type': RPCMessageType.SELL_CANCEL, 'trade_id': 1, 'exchange': 'Binance', 'pair': 'KEY/ETH', 'reason': 'timeout' }) assert msg_mock.call_args[0][0] \ == ('\N{WARNING SIGN} *Binance:* Cancelling open sell Order for KEY/ETH (#1).' ' Reason: timeout.') # Reset singleton function to avoid random breaks telegram._rpc._fiat_converter.convert_amount = old_convamount def test_send_msg_sell_fill_notification(default_conf, mocker) -> None: default_conf['telegram']['notification_settings']['sell_fill'] = 'on' telegram, _, msg_mock = get_telegram_testobject(mocker, default_conf) telegram.send_msg({ 'type': RPCMessageType.SELL_FILL, 'trade_id': 1, 'exchange': 'Binance', 'pair': 'ETH/USDT', 'gain': 'loss', 'limit': 3.201e-05, 'amount': 0.1, 'order_type': 'market', 'open_rate': 500, 'close_rate': 550, 'current_rate': 3.201e-05, 'profit_amount': -0.05746268, 'profit_ratio': -0.57405275, 'stake_currency': 'ETH', 'fiat_currency': 'USD', 'sell_reason': SellType.STOP_LOSS.value, 'open_date': arrow.utcnow().shift(hours=-1), 'close_date': arrow.utcnow(), }) assert msg_mock.call_args[0][0] \ == ('\N{LARGE CIRCLE} *Binance:* Sell order for ETH/USDT (#1) filled for 550.') def test_send_msg_status_notification(default_conf, mocker) -> None: telegram, _, msg_mock = get_telegram_testobject(mocker, default_conf) telegram.send_msg({ 'type': RPCMessageType.STATUS, 'status': 'running' }) assert msg_mock.call_args[0][0] == '*Status:* `running`' def test_warning_notification(default_conf, mocker) -> None: telegram, _, msg_mock = get_telegram_testobject(mocker, default_conf) telegram.send_msg({ 'type': RPCMessageType.WARNING, 'status': 'message' }) assert msg_mock.call_args[0][0] == '\N{WARNING SIGN} *Warning:* `message`' def test_startup_notification(default_conf, mocker) -> None: telegram, _, msg_mock = get_telegram_testobject(mocker, default_conf) telegram.send_msg({ 'type': RPCMessageType.STARTUP, 'status': '*Custom:* `Hello World`' }) assert msg_mock.call_args[0][0] == '*Custom:* `Hello World`' def test_send_msg_unknown_type(default_conf, mocker) -> None: telegram, _, _ = get_telegram_testobject(mocker, default_conf) with pytest.raises(NotImplementedError, match=r'Unknown message type: None'): telegram.send_msg({ 'type': None, }) def test_send_msg_buy_notification_no_fiat(default_conf, mocker) -> None: del default_conf['fiat_display_currency'] telegram, _, msg_mock = get_telegram_testobject(mocker, default_conf) telegram.send_msg({ 'type': RPCMessageType.BUY, 'buy_tag': 'buy_signal_01', 'trade_id': 1, 'exchange': 'Binance', 'pair': 'ETH/BTC', 'limit': 1.099e-05, 'order_type': 'limit', 'stake_amount': 0.001, 'stake_amount_fiat': 0.0, 'stake_currency': 'BTC', 'fiat_currency': None, 'current_rate': 1.099e-05, 'amount': 1333.3333333333335, 'open_date': arrow.utcnow().shift(hours=-1) }) assert msg_mock.call_args[0][0] == ('\N{LARGE BLUE CIRCLE} *Binance:* Buying ETH/BTC (#1)\n' '*Buy Tag:* `buy_signal_01`\n' '*Amount:* `1333.33333333`\n' '*Open Rate:* `0.00001099`\n' '*Current Rate:* `0.00001099`\n' '*Total:* `(0.00100000 BTC)`') def test_send_msg_sell_notification_no_fiat(default_conf, mocker) -> None: del default_conf['fiat_display_currency'] telegram, _, msg_mock = get_telegram_testobject(mocker, default_conf) telegram.send_msg({ 'type': RPCMessageType.SELL, 'trade_id': 1, 'exchange': 'Binance', 'pair': 'KEY/ETH', 'gain': 'loss', 'limit': 3.201e-05, 'amount': 1333.3333333333335, 'order_type': 'limit', 'open_rate': 7.5e-05, 'current_rate': 3.201e-05, 'profit_amount': -0.05746268, 'profit_ratio': -0.57405275, 'stake_currency': 'ETH', 'fiat_currency': 'USD', 'sell_reason': SellType.STOP_LOSS.value, 'open_date': arrow.utcnow().shift(hours=-2, minutes=-35, seconds=-3), 'close_date': arrow.utcnow(), }) assert msg_mock.call_args[0][0] == ('\N{WARNING SIGN} *Binance:* Selling KEY/ETH (#1)\n' '*Profit:* `-57.41%`\n' '*Sell Reason:* `stop_loss`\n' '*Duration:* `2:35:03 (155.1 min)`\n' '*Amount:* `1333.33333333`\n' '*Open Rate:* `0.00007500`\n' '*Current Rate:* `0.00003201`\n' '*Close Rate:* `0.00003201`' ) @pytest.mark.parametrize('msg,expected', [ ({'profit_percent': 20.1, 'sell_reason': 'roi'}, "\N{ROCKET}"), ({'profit_percent': 5.1, 'sell_reason': 'roi'}, "\N{ROCKET}"), ({'profit_percent': 2.56, 'sell_reason': 'roi'}, "\N{EIGHT SPOKED ASTERISK}"), ({'profit_percent': 1.0, 'sell_reason': 'roi'}, "\N{EIGHT SPOKED ASTERISK}"), ({'profit_percent': 0.0, 'sell_reason': 'roi'}, "\N{EIGHT SPOKED ASTERISK}"), ({'profit_percent': -5.0, 'sell_reason': 'stop_loss'}, "\N{WARNING SIGN}"), ({'profit_percent': -2.0, 'sell_reason': 'sell_signal'}, "\N{CROSS MARK}"), ]) def test__sell_emoji(default_conf, mocker, msg, expected): del default_conf['fiat_display_currency'] telegram, _, _ = get_telegram_testobject(mocker, default_conf) assert telegram._get_sell_emoji(msg) == expected def test_telegram__send_msg(default_conf, mocker, caplog) -> None: mocker.patch('freqtrade.rpc.telegram.Telegram._init', MagicMock()) bot = MagicMock() telegram, _, _ = get_telegram_testobject(mocker, default_conf, mock=False) telegram._updater = MagicMock() telegram._updater.bot = bot telegram._config['telegram']['enabled'] = True telegram._send_msg('test') assert len(bot.method_calls) == 1 # Test update query = MagicMock() telegram._send_msg('test', callback_path="DeadBeef", query=query, reload_able=True) edit_message_text = telegram._updater.bot.edit_message_text assert edit_message_text.call_count == 1 assert "Updated: " in edit_message_text.call_args_list[0][1]['text'] telegram._updater.bot.edit_message_text = MagicMock(side_effect=BadRequest("not modified")) telegram._send_msg('test', callback_path="DeadBeef", query=query) assert telegram._updater.bot.edit_message_text.call_count == 1 assert not log_has_re(r"TelegramError: .*", caplog) telegram._updater.bot.edit_message_text = MagicMock(side_effect=BadRequest("")) telegram._send_msg('test2', callback_path="DeadBeef", query=query) assert telegram._updater.bot.edit_message_text.call_count == 1 assert log_has_re(r"TelegramError: .*", caplog) telegram._updater.bot.edit_message_text = MagicMock(side_effect=TelegramError("DeadBEEF")) telegram._send_msg('test3', callback_path="DeadBeef", query=query) assert log_has_re(r"TelegramError: DeadBEEF! Giving up.*", caplog) def test__send_msg_network_error(default_conf, mocker, caplog) -> None: mocker.patch('freqtrade.rpc.telegram.Telegram._init', MagicMock()) bot = MagicMock() bot.send_message = MagicMock(side_effect=NetworkError('Oh snap')) telegram, _, _ = get_telegram_testobject(mocker, default_conf, mock=False) telegram._updater = MagicMock() telegram._updater.bot = bot telegram._config['telegram']['enabled'] = True telegram._send_msg('test') # Bot should've tried to send it twice assert len(bot.method_calls) == 2 assert log_has('Telegram NetworkError: Oh snap! Trying one more time.', caplog) def test__send_msg_keyboard(default_conf, mocker, caplog) -> None: mocker.patch('freqtrade.rpc.telegram.Telegram._init', MagicMock()) bot = MagicMock() bot.send_message = MagicMock() freqtradebot = get_patched_freqtradebot(mocker, default_conf) rpc = RPC(freqtradebot) invalid_keys_list = [['/not_valid', '/profit'], ['/daily'], ['/alsoinvalid']] default_keys_list = [['/daily', '/profit', '/balance'], ['/status', '/status table', '/performance'], ['/count', '/start', '/stop', '/help']] default_keyboard = ReplyKeyboardMarkup(default_keys_list) custom_keys_list = [['/daily', '/stats', '/balance', '/profit', '/profit 5'], ['/count', '/start', '/reload_config', '/help']] custom_keyboard = ReplyKeyboardMarkup(custom_keys_list) def init_telegram(freqtradebot): telegram = Telegram(rpc, default_conf) telegram._updater = MagicMock() telegram._updater.bot = bot return telegram # no keyboard in config -> default keyboard freqtradebot.config['telegram']['enabled'] = True telegram = init_telegram(freqtradebot) telegram._send_msg('test') used_keyboard = bot.send_message.call_args[1]['reply_markup'] assert used_keyboard == default_keyboard # invalid keyboard in config -> default keyboard freqtradebot.config['telegram']['enabled'] = True freqtradebot.config['telegram']['keyboard'] = invalid_keys_list err_msg = re.escape("config.telegram.keyboard: Invalid commands for custom " "Telegram keyboard: ['/not_valid', '/alsoinvalid']" "\nvalid commands are: ") + r"*" with pytest.raises(OperationalException, match=err_msg): telegram = init_telegram(freqtradebot) # valid keyboard in config -> custom keyboard freqtradebot.config['telegram']['enabled'] = True freqtradebot.config['telegram']['keyboard'] = custom_keys_list telegram = init_telegram(freqtradebot) telegram._send_msg('test') used_keyboard = bot.send_message.call_args[1]['reply_markup'] assert used_keyboard == custom_keyboard assert log_has("using custom keyboard from config.json: " "[['/daily', '/stats', '/balance', '/profit', '/profit 5'], ['/count', " "'/start', '/reload_config', '/help']]", caplog)
36.766667
99
0.664373
7,983
61,768
4.897407
0.07278
0.047805
0.050926
0.047959
0.795248
0.769158
0.741892
0.70754
0.68053
0.654645
0
0.033708
0.204167
61,768
1,679
100
36.788565
0.761519
0.038046
0
0.611505
0
0.005516
0.192478
0.04166
0
0
0.000169
0
0.197794
1
0.052009
false
0.000788
0.017336
0
0.07171
0
0
0
0
null
0
0
0
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
1d0c70f6ccc2504429b90abc62f72486e916ce95
44
py
Python
__init__.py
halibot-extra/github
7f70aaaad964178436c9c1016fe08b0b03489e92
[ "BSD-3-Clause" ]
null
null
null
__init__.py
halibot-extra/github
7f70aaaad964178436c9c1016fe08b0b03489e92
[ "BSD-3-Clause" ]
4
2015-10-30T17:38:44.000Z
2017-10-04T00:19:12.000Z
__init__.py
halibot-extra/gitlab
120f9f897596f8fb784388bf0561be186ab38600
[ "BSD-3-Clause" ]
null
null
null
from .github import Github Default = Github
14.666667
26
0.795455
6
44
5.833333
0.666667
0
0
0
0
0
0
0
0
0
0
0
0.159091
44
2
27
22
0.945946
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
df19d3aef5ae29829f24b7ae4d89a0a73be14c8f
265
py
Python
tests/setup_env.py
g0e/fsglue
736925ea8ae10a28698b41059427365a17967f83
[ "MIT" ]
1
2020-11-04T15:09:20.000Z
2020-11-04T15:09:20.000Z
tests/setup_env.py
g0e/fsglue
736925ea8ae10a28698b41059427365a17967f83
[ "MIT" ]
null
null
null
tests/setup_env.py
g0e/fsglue
736925ea8ae10a28698b41059427365a17967f83
[ "MIT" ]
null
null
null
import fsglue from unittest import mock import google.auth.credentials import os os.environ["FIRESTORE_EMULATOR_HOST"] = "localhost:8001" credentials = mock.Mock(spec=google.auth.credentials.Credentials) fsglue.initialize(project="test", credentials=credentials)
26.5
65
0.822642
33
265
6.545455
0.575758
0.092593
0.194444
0
0
0
0
0
0
0
0
0.016327
0.075472
265
9
66
29.444444
0.865306
0
0
0
0
0
0.154717
0.086792
0
0
0
0
0
1
0
false
0
0.571429
0
0.571429
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
df4e7bb657223cfc10fb642455c425282a2195ca
221
py
Python
tx_tecreports/management/commands/retrieve_report.py
texas/tx_tecreports
1e2311c8355117b45d7b23a4db41512c0cb6ce80
[ "Apache-2.0" ]
1
2021-02-19T20:16:19.000Z
2021-02-19T20:16:19.000Z
tx_tecreports/management/commands/retrieve_report.py
texas/tx_tecreports
1e2311c8355117b45d7b23a4db41512c0cb6ce80
[ "Apache-2.0" ]
null
null
null
tx_tecreports/management/commands/retrieve_report.py
texas/tx_tecreports
1e2311c8355117b45d7b23a4db41512c0cb6ce80
[ "Apache-2.0" ]
null
null
null
from django.core.management.base import BaseCommand from ...fetcher import get_report class Command(BaseCommand): def handle(self, report_id, **kwargs): report = get_report(report_id) report.save()
22.1
51
0.714932
28
221
5.5
0.642857
0.116883
0
0
0
0
0
0
0
0
0
0
0.18552
221
9
52
24.555556
0.855556
0
0
0
0
0
0
0
0
0
0
0
0
1
0.166667
false
0
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
df708d501c2fdce149fd81527b5d01185bcfa0e8
2,477
py
Python
explorations/migrations/0002_auto_20210216_1626.py
aphp/Cohort360-Back-end
03184db6c4cb639955e2f3726c7e1b5cc7809f01
[ "Apache-2.0" ]
9
2020-11-04T13:08:47.000Z
2022-02-03T17:04:05.000Z
explorations/migrations/0002_auto_20210216_1626.py
aphp/Cohort360-Back-end
03184db6c4cb639955e2f3726c7e1b5cc7809f01
[ "Apache-2.0" ]
7
2021-03-17T17:48:26.000Z
2022-02-10T13:27:43.000Z
explorations/migrations/0002_auto_20210216_1626.py
aphp/Cohort360-Back-end
03184db6c4cb639955e2f3726c7e1b5cc7809f01
[ "Apache-2.0" ]
2
2020-11-23T10:42:40.000Z
2022-02-03T17:04:09.000Z
# Generated by Django 2.2.9 on 2021-02-16 16:26 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('explorations', '0001_initial'), ] operations = [ migrations.AddField( model_name='cohortresult', name='create_task_id', field=models.TextField(blank=True), ), migrations.AddField( model_name='cohortresult', name='request_job_duration', field=models.TextField(blank=True), ), migrations.AddField( model_name='cohortresult', name='request_job_fail_msg', field=models.TextField(blank=True), ), migrations.AddField( model_name='datedmeasure', name='count_task_id', field=models.TextField(blank=True), ), migrations.AddField( model_name='datedmeasure', name='request_job_duration', field=models.TextField(blank=True), ), migrations.AddField( model_name='datedmeasure', name='request_job_fail_msg', field=models.TextField(blank=True), ), migrations.AddField( model_name='datedmeasure', name='request_job_id', field=models.TextField(blank=True), ), migrations.AddField( model_name='datedmeasure', name='request_job_status', field=models.CharField(choices=[('pending', 'pending'), ('started', 'started'), ('cancelled', 'cancelled'), ('failed', 'failed'), ('finished', 'finished')], default='pending', max_length=10), ), migrations.AlterField( model_name='cohortresult', name='request_job_status', field=models.CharField(choices=[('pending', 'pending'), ('started', 'started'), ('cancelled', 'cancelled'), ('failed', 'failed'), ('finished', 'finished')], default='pending', max_length=10), ), migrations.AlterField( model_name='datedmeasure', name='fhir_datetime', field=models.DateTimeField(null=True), ), migrations.AlterField( model_name='datedmeasure', name='measure', field=models.BigIntegerField(null=True), ), migrations.AlterUniqueTogether( name='cohortresult', unique_together=set(), ), ]
33.931507
203
0.5652
215
2,477
6.344186
0.293023
0.072581
0.134897
0.158358
0.777126
0.777126
0.692082
0.692082
0.692082
0.692082
0
0.013295
0.301574
2,477
72
204
34.402778
0.775145
0.018167
0
0.742424
1
0
0.208642
0
0
0
0
0
0
1
0
false
0
0.015152
0
0.060606
0
0
0
0
null
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
df880a4f9f4dc7e2486bb8f946f61a7832ff7e7c
192
py
Python
Chapter01/draft_1.py
MrandrewGR/The-Python-Workshop
6d751c7bc19324a6bfe80309683421911bacf47f
[ "MIT" ]
null
null
null
Chapter01/draft_1.py
MrandrewGR/The-Python-Workshop
6d751c7bc19324a6bfe80309683421911bacf47f
[ "MIT" ]
null
null
null
Chapter01/draft_1.py
MrandrewGR/The-Python-Workshop
6d751c7bc19324a6bfe80309683421911bacf47f
[ "MIT" ]
null
null
null
for num in range(10,100): if num % 2 == 0: continue if num % 3 == 0: continue if num % 5 == 0: continue if num % 7 == 0: continue print(num)
19.2
25
0.447917
28
192
3.071429
0.5
0.232558
0.383721
0.488372
0
0
0
0
0
0
0
0.12037
0.4375
192
10
26
19.2
0.675926
0
0
0.4
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0.1
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
10c63fbf9e6cc1b8f6d0de996a59f894ccaaacd9
99
py
Python
nepp_django/recogination/apps.py
soeasy10/nepp
e5f29a6839353b71f9f04d5bad88cb610426f372
[ "MIT" ]
3
2019-06-27T02:52:40.000Z
2019-06-28T17:55:46.000Z
nepp_django/recogination/apps.py
soeasy10/nepp
e5f29a6839353b71f9f04d5bad88cb610426f372
[ "MIT" ]
null
null
null
nepp_django/recogination/apps.py
soeasy10/nepp
e5f29a6839353b71f9f04d5bad88cb610426f372
[ "MIT" ]
null
null
null
from django.apps import AppConfig class RecoginationConfig(AppConfig): name = 'recogination'
16.5
36
0.777778
10
99
7.7
0.9
0
0
0
0
0
0
0
0
0
0
0
0.151515
99
5
37
19.8
0.916667
0
0
0
0
0
0.121212
0
0
0
0
0
0
1
0
false
0
0.333333
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
10cf9c97a825330f5d5afaaded36015d2187ba04
127
py
Python
tartiflette/resolver/__init__.py
alexchamberlain/tartiflette
6904b0f47770c348553e907be5f5bdb0929fe149
[ "MIT" ]
null
null
null
tartiflette/resolver/__init__.py
alexchamberlain/tartiflette
6904b0f47770c348553e907be5f5bdb0929fe149
[ "MIT" ]
1
2020-08-11T15:41:41.000Z
2020-08-11T15:41:41.000Z
tartiflette/resolver/__init__.py
alexchamberlain/tartiflette
6904b0f47770c348553e907be5f5bdb0929fe149
[ "MIT" ]
null
null
null
from .factory import ResolverExecutorFactory from .resolver import Resolver __all__ = ["Resolver", "ResolverExecutorFactory"]
25.4
49
0.818898
11
127
9.090909
0.545455
0
0
0
0
0
0
0
0
0
0
0
0.102362
127
4
50
31.75
0.877193
0
0
0
0
0
0.244094
0.181102
0
0
0
0
0
1
0
false
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
10de3bda7d12637dcd3a98110fe9c872ef660757
1,693
py
Python
tests/test_trace_frame.py
NotPeopling2day/evm-trace
f344d29af17bf565e0b740faf62933ba0d5bff66
[ "Apache-2.0" ]
null
null
null
tests/test_trace_frame.py
NotPeopling2day/evm-trace
f344d29af17bf565e0b740faf62933ba0d5bff66
[ "Apache-2.0" ]
null
null
null
tests/test_trace_frame.py
NotPeopling2day/evm-trace
f344d29af17bf565e0b740faf62933ba0d5bff66
[ "Apache-2.0" ]
1
2022-03-17T22:59:06.000Z
2022-03-17T22:59:06.000Z
from copy import deepcopy import pytest from pydantic import ValidationError from evm_trace.base import TraceFrame TRACE_FRAME_STRUCTURE = { "pc": 1564, "op": "RETURN", "gas": 0, "gasCost": 0, "depth": 1, "stack": [ "0000000000000000000000000000000000000000000000000000000040c10f19", "0000000000000000000000000000000000000000000000000000000000000020", "0000000000000000000000000000000000000000000000000000000000000140", ], "memory": [ "0000000000000000000000001e59ce931b4cfea3fe4b875411e280e173cb7a9c", "0000000000000000000000000000000000000000000000000000000000000001", ], "storage": { "0000000000000000000000000000000000000000000000000000000000000004": "0000000000000000000000001e59ce931b4cfea3fe4b875411e280e173cb7a9c", # noqa: E501 "ad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5": "0000000000000000000000001e59ce931b4cfea3fe4b875411e280e173cb7a9c", # noqa: E501 "aadb61a4b4c5d48b7a5669391b7c73852a3ab7795f24721b9a439220b54b591b": "0000000000000000000000000000000000000000000000000000000000000001", # noqa: E501 }, } def test_trace_frame_validation_passes(): frame = TraceFrame(**TRACE_FRAME_STRUCTURE) assert frame trace_frame_test_cases = ( {"stack": ["potato"]}, {"memory": ["potato"]}, {"storage": {"piggy": "dippin"}}, ) @pytest.mark.parametrize("test_value", trace_frame_test_cases) def test_trace_frame_validation_fails(test_value): trace_frame_structure = deepcopy(TRACE_FRAME_STRUCTURE) trace_frame_structure.update(test_value) with pytest.raises(ValidationError): TraceFrame(**trace_frame_structure)
33.86
157
0.764324
111
1,693
11.387387
0.459459
0.079114
0.09019
0.068829
0.042722
0
0
0
0
0
0
0.432918
0.145895
1,693
49
158
34.55102
0.441217
0.018901
0
0.051282
0
0
0.481593
0.424864
0
0
0
0
0.025641
1
0.051282
false
0.025641
0.102564
0
0.153846
0
0
0
1
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
8011c33ead3d763ffadd061bd6110bcce15fabdd
484
py
Python
src/metarl/np/algos/__init__.py
neurips2020submission11699/metarl
ae4825d21478fa1fd0aa6b116941ea40caa152a5
[ "MIT" ]
2
2021-02-07T12:14:52.000Z
2021-07-29T08:07:22.000Z
src/metarl/np/algos/__init__.py
neurips2020submission11699/metarl
ae4825d21478fa1fd0aa6b116941ea40caa152a5
[ "MIT" ]
null
null
null
src/metarl/np/algos/__init__.py
neurips2020submission11699/metarl
ae4825d21478fa1fd0aa6b116941ea40caa152a5
[ "MIT" ]
null
null
null
"""Reinforcement learning algorithms which use NumPy as a numerical backend.""" from metarl.np.algos.cem import CEM from metarl.np.algos.cma_es import CMAES from metarl.np.algos.meta_rl_algorithm import MetaRLAlgorithm from metarl.np.algos.nop import NOP from metarl.np.algos.off_policy_rl_algorithm import OffPolicyRLAlgorithm from metarl.np.algos.rl_algorithm import RLAlgorithm __all__ = [ 'RLAlgorithm', 'CEM', 'CMAES', 'MetaRLAlgorithm', 'NOP', 'OffPolicyRLAlgorithm' ]
37.230769
79
0.797521
66
484
5.681818
0.439394
0.16
0.192
0.272
0
0
0
0
0
0
0
0
0.109504
484
12
80
40.333333
0.87007
0.150826
0
0
0
0
0.140741
0
0
0
0
0
0
1
0
false
0
0.6
0
0.6
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
3379713ed7ba5ec45c080155ba6ceca8dc062164
102
py
Python
raiden/tests/utils/transport.py
dannyk03/raiden
ebc1b598ef24d4e9c4d2cd26a79921338928e75e
[ "MIT" ]
1
2018-10-27T11:30:06.000Z
2018-10-27T11:30:06.000Z
raiden/tests/utils/transport.py
dannyk03/raiden
ebc1b598ef24d4e9c4d2cd26a79921338928e75e
[ "MIT" ]
null
null
null
raiden/tests/utils/transport.py
dannyk03/raiden
ebc1b598ef24d4e9c4d2cd26a79921338928e75e
[ "MIT" ]
null
null
null
class MockDiscovery(object): def get(self, node_address: bytes): return '127.0.0.1:5252'
20.4
39
0.656863
15
102
4.4
0.933333
0
0
0
0
0
0
0
0
0
0
0.123457
0.205882
102
4
40
25.5
0.691358
0
0
0
0
0
0.137255
0
0
0
0
0
0
1
0.333333
false
0
0
0.333333
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
0
0
0
4
33a0575e276fdc8c7503b0b5bfce0dbd7256454d
38
py
Python
venv/lib/python3.6/encodings/gb2312.py
JamesMusyoka/Blog
fdcb51cf4541bbb3b9b3e7a1c3735a0b1f45f0b5
[ "Unlicense" ]
2
2019-04-17T13:35:50.000Z
2021-12-21T00:11:36.000Z
venv/lib/python3.6/encodings/gb2312.py
JamesMusyoka/Blog
fdcb51cf4541bbb3b9b3e7a1c3735a0b1f45f0b5
[ "Unlicense" ]
2
2021-03-31T19:51:24.000Z
2021-06-10T23:05:09.000Z
venv/lib/python3.6/encodings/gb2312.py
JamesMusyoka/Blog
fdcb51cf4541bbb3b9b3e7a1c3735a0b1f45f0b5
[ "Unlicense" ]
2
2019-10-01T08:47:35.000Z
2020-07-11T06:32:16.000Z
/usr/lib/python3.6/encodings/gb2312.py
38
38
0.815789
7
38
4.428571
1
0
0
0
0
0
0
0
0
0
0
0.157895
0
38
1
38
38
0.657895
0
0
0
0
0
0
0
0
0
0
0
0
0
null
null
0
0
null
null
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
4
d512e8df2a7310190c6c2300cadb679d830ac591
57
py
Python
multiplication/mul.py
PranauvS/PPP
8f95fb93c25758a0cda18c642af24da15951165f
[ "MIT" ]
null
null
null
multiplication/mul.py
PranauvS/PPP
8f95fb93c25758a0cda18c642af24da15951165f
[ "MIT" ]
null
null
null
multiplication/mul.py
PranauvS/PPP
8f95fb93c25758a0cda18c642af24da15951165f
[ "MIT" ]
null
null
null
def do_mul: a=2 b=4 print(a*b) do_mul() #aaa
8.142857
14
0.508772
13
57
2.076923
0.692308
0.37037
0
0
0
0
0
0
0
0
0
0.052632
0.333333
57
6
15
9.5
0.657895
0.052632
0
0
0
0
0
0
0
0
0
0
0
0
null
null
0
0
null
null
0.2
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
4
1d7904a25f8525f6ff94dc755535647c960d5477
239
py
Python
hydra/tonemap/__init__.py
JimBoonie/hydra
63665090812e4e209c67d5dc0b84b5bb35a57ead
[ "MIT" ]
28
2015-12-30T22:38:16.000Z
2021-03-21T07:52:39.000Z
hydra/tonemap/__init__.py
JimBoonie/hydra
63665090812e4e209c67d5dc0b84b5bb35a57ead
[ "MIT" ]
2
2017-02-23T09:54:09.000Z
2018-12-14T12:20:28.000Z
hydra/tonemap/__init__.py
JimBoonie/hydra
63665090812e4e209c67d5dc0b84b5bb35a57ead
[ "MIT" ]
7
2017-02-23T09:43:24.000Z
2022-01-10T12:17:36.000Z
from .normalize import * from .logarithmic import * from .exponential import * from .gamma import * from .tumblin import * from .reinhard import * from .durand import * from .drago import * from .fattal import * from .lischinski import *
19.916667
26
0.74477
30
239
5.933333
0.4
0.505618
0
0
0
0
0
0
0
0
0
0
0.171548
239
11
27
21.727273
0.89899
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
4