hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
287632adc920cdc6ad27d4bdb4cbfb0e7f0185b2
| 75
|
py
|
Python
|
problem_solving/algorithms/warmup/q8_mini_max_sum.py
|
mxdzi/hackerrank
|
4455f73e4479a4204b2e1167253f6a02351aa5b7
|
[
"MIT"
] | null | null | null |
problem_solving/algorithms/warmup/q8_mini_max_sum.py
|
mxdzi/hackerrank
|
4455f73e4479a4204b2e1167253f6a02351aa5b7
|
[
"MIT"
] | null | null | null |
problem_solving/algorithms/warmup/q8_mini_max_sum.py
|
mxdzi/hackerrank
|
4455f73e4479a4204b2e1167253f6a02351aa5b7
|
[
"MIT"
] | null | null | null |
def miniMaxSum(arr):
arr.sort()
print(sum(arr[:-1]), sum(arr[1:]))
| 18.75
| 38
| 0.56
| 12
| 75
| 3.5
| 0.583333
| 0.285714
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.032258
| 0.173333
| 75
| 3
| 39
| 25
| 0.645161
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.333333
| 0.333333
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
28805e886490e33dea415b6f47ab761b2f9c5314
| 265
|
py
|
Python
|
torchreid/engine/__init__.py
|
Arindam-1991/deep_reid
|
ab68d95c2229ef5b832a6a6b614a9b91e4984bd5
|
[
"MIT"
] | 1
|
2021-03-27T17:27:47.000Z
|
2021-03-27T17:27:47.000Z
|
torchreid/engine/__init__.py
|
Arindam-1991/deep_reid
|
ab68d95c2229ef5b832a6a6b614a9b91e4984bd5
|
[
"MIT"
] | null | null | null |
torchreid/engine/__init__.py
|
Arindam-1991/deep_reid
|
ab68d95c2229ef5b832a6a6b614a9b91e4984bd5
|
[
"MIT"
] | null | null | null |
from __future__ import print_function, absolute_import
from .image import ImageSoftmaxEngine, ImageTripletEngine
from .image import ImageTripletEngine_DG, ImageQAConvEngine
from .video import VideoSoftmaxEngine, VideoTripletEngine
from .engine import Engine
| 37.857143
| 60
| 0.85283
| 27
| 265
| 8.111111
| 0.555556
| 0.082192
| 0.136986
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116981
| 265
| 6
| 61
| 44.166667
| 0.935897
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.2
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
28a23cbe9ccbb707d734088f1f5db0a7fffc7540
| 156
|
py
|
Python
|
osf/files/addons-wiki-local.py
|
sifulan-access-federation/helm-charts
|
ea6a36cedc0e5743d6b04440816c9dd8071a23e2
|
[
"Apache-2.0"
] | null | null | null |
osf/files/addons-wiki-local.py
|
sifulan-access-federation/helm-charts
|
ea6a36cedc0e5743d6b04440816c9dd8071a23e2
|
[
"Apache-2.0"
] | null | null | null |
osf/files/addons-wiki-local.py
|
sifulan-access-federation/helm-charts
|
ea6a36cedc0e5743d6b04440816c9dd8071a23e2
|
[
"Apache-2.0"
] | null | null | null |
import os
SHAREJS_HOST = os.environ['SHAREJS_HOST']
#SHAREJS_URL = '{}:{}'.format(SHAREJS_HOST, SHAREJS_PORT)
SHAREJS_URL = os.environ['OSF_SHAREJS_URL']
| 22.285714
| 57
| 0.75
| 22
| 156
| 4.954545
| 0.409091
| 0.302752
| 0.330275
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.089744
| 156
| 6
| 58
| 26
| 0.767606
| 0.358974
| 0
| 0
| 0
| 0
| 0.272727
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
95552392964fafda5a556d7da53e06f994aaf50c
| 269
|
py
|
Python
|
setup.py
|
andrewp-as-is/django-admin-commands.py
|
88b5d22ca45104a1fc095c9b2855f5355c9eb8ea
|
[
"Unlicense"
] | 1
|
2021-09-23T18:16:56.000Z
|
2021-09-23T18:16:56.000Z
|
setup.py
|
andrewp-as-is/django-command-admin.py
|
88b5d22ca45104a1fc095c9b2855f5355c9eb8ea
|
[
"Unlicense"
] | null | null | null |
setup.py
|
andrewp-as-is/django-command-admin.py
|
88b5d22ca45104a1fc095c9b2855f5355c9eb8ea
|
[
"Unlicense"
] | null | null | null |
from setuptools import setup
setup(
name='django-command-admin',
version='2021.6.21',
packages=[
'django_command_admin',
'django_command_admin.admin',
'django_command_admin.migrations',
'django_command_admin.models'
]
)
| 20.692308
| 42
| 0.650558
| 29
| 269
| 5.758621
| 0.517241
| 0.389222
| 0.538922
| 0.275449
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033981
| 0.234201
| 269
| 12
| 43
| 22.416667
| 0.776699
| 0
| 0
| 0
| 0
| 0
| 0.494424
| 0.312268
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.090909
| 0
| 0.090909
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
95875a6a4c3f6e728a147bbaf1de6024a9db8cee
| 48
|
py
|
Python
|
flickipedia/web/model.py
|
rfaulkner/Flickipedia
|
1b53f30be4027901748a09c411d568c7148f4e4b
|
[
"BSD-2-Clause"
] | 1
|
2016-03-11T09:40:19.000Z
|
2016-03-11T09:40:19.000Z
|
flickipedia/web/model.py
|
rfaulkner/Flickipedia
|
1b53f30be4027901748a09c411d568c7148f4e4b
|
[
"BSD-2-Clause"
] | 1
|
2015-02-27T02:23:19.000Z
|
2015-02-27T02:23:19.000Z
|
flickipedia/web/model.py
|
rfaulkner/Flickipedia
|
1b53f30be4027901748a09c411d568c7148f4e4b
|
[
"BSD-2-Clause"
] | null | null | null |
""" Defines the data model for the backend. """
| 24
| 47
| 0.666667
| 7
| 48
| 4.571429
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1875
| 48
| 1
| 48
| 48
| 0.820513
| 0.8125
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
95a65ac8b5490f889dfb6d4899690b82aadeff5f
| 137
|
py
|
Python
|
lib/djWasabi/__init__.py
|
dj-wasabi/dj-wasabi-release
|
8d4742b8b5e9dd4c183c5e07d14086781d848488
|
[
"MIT"
] | null | null | null |
lib/djWasabi/__init__.py
|
dj-wasabi/dj-wasabi-release
|
8d4742b8b5e9dd4c183c5e07d14086781d848488
|
[
"MIT"
] | 10
|
2021-01-07T20:22:18.000Z
|
2021-03-16T19:47:39.000Z
|
lib/djWasabi/__init__.py
|
dj-wasabi/dj-wasabi-release
|
8d4742b8b5e9dd4c183c5e07d14086781d848488
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import djWasabi.config
import djWasabi.container
import djWasabi.git
import djWasabi.generic
import djWasabi.http
| 17.125
| 25
| 0.832117
| 19
| 137
| 6
| 0.578947
| 0.614035
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.094891
| 137
| 7
| 26
| 19.571429
| 0.919355
| 0.145985
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
95bed3135c5beff671116fc8f5015ee52c9bdad5
| 241
|
py
|
Python
|
Scripts/Operadores Aritmeticos/OP D7.py
|
Vinicius-de-Souza-Reis-Lima/Python
|
2009e9f5be10a1cf4e506a7f9f17c6b90a30c7c7
|
[
"MIT"
] | null | null | null |
Scripts/Operadores Aritmeticos/OP D7.py
|
Vinicius-de-Souza-Reis-Lima/Python
|
2009e9f5be10a1cf4e506a7f9f17c6b90a30c7c7
|
[
"MIT"
] | null | null | null |
Scripts/Operadores Aritmeticos/OP D7.py
|
Vinicius-de-Souza-Reis-Lima/Python
|
2009e9f5be10a1cf4e506a7f9f17c6b90a30c7c7
|
[
"MIT"
] | null | null | null |
print('Pintura de parede')
print('='*25)
l = float(input('Qual a largura da parede? '))
a = float(input('Qual a altura da parede? '))
print('A área da parede é {:.3f} m² e é necessário {:.3f} L de tinta para pintá-lo.'.format(l*a, (l*a)/2))
| 40.166667
| 106
| 0.643154
| 46
| 241
| 3.369565
| 0.543478
| 0.154839
| 0.180645
| 0.193548
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.029412
| 0.153527
| 241
| 5
| 107
| 48.2
| 0.730392
| 0
| 0
| 0
| 0
| 0.2
| 0.60166
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.6
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
95f70830a1a6789f672b7174c8a8313a69cdd1ae
| 124
|
py
|
Python
|
src/HABApp/core/files/manager/__init__.py
|
DerOetzi/HABApp
|
a123fbfa9928ebb3cda9a84f6984dcba593c8236
|
[
"Apache-2.0"
] | 44
|
2018-12-13T08:46:44.000Z
|
2022-03-07T03:23:21.000Z
|
src/HABApp/core/files/manager/__init__.py
|
DerOetzi/HABApp
|
a123fbfa9928ebb3cda9a84f6984dcba593c8236
|
[
"Apache-2.0"
] | 156
|
2019-03-02T20:53:31.000Z
|
2022-03-23T13:13:58.000Z
|
src/HABApp/core/files/manager/__init__.py
|
DerOetzi/HABApp
|
a123fbfa9928ebb3cda9a84f6984dcba593c8236
|
[
"Apache-2.0"
] | 18
|
2019-03-08T07:13:21.000Z
|
2022-03-22T19:52:31.000Z
|
from .files import FILES, file_state_changed
from .listen_events import setup_file_manager
from .worker import process_file
| 31
| 45
| 0.862903
| 19
| 124
| 5.315789
| 0.631579
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.104839
| 124
| 3
| 46
| 41.333333
| 0.90991
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
c27af80b65a0425130520ae4f1a2213ce1c7418c
| 1,250
|
py
|
Python
|
docs/core_stubs.py
|
dgehringer/sqsgenerator
|
562697166a53f806629e8e1086b381871d9a675e
|
[
"MIT"
] | 14
|
2019-11-16T10:34:04.000Z
|
2022-03-28T09:32:42.000Z
|
docs/core_stubs.py
|
dgehringer/sqsgenerator
|
562697166a53f806629e8e1086b381871d9a675e
|
[
"MIT"
] | 5
|
2019-11-21T05:54:07.000Z
|
2022-03-29T07:56:34.000Z
|
docs/core_stubs.py
|
dgehringer/sqsgenerator
|
562697166a53f806629e8e1086b381871d9a675e
|
[
"MIT"
] | 4
|
2020-09-28T14:28:23.000Z
|
2021-03-05T14:11:44.000Z
|
"""
Because on the ReadTheDocs Server we cannot import the core extension, because a libpython*so which the core.so
is linked against is not found, this module fakes the core.so such that imports works
"""
__version__ = (1, 2, 3, 4)
__features__ = {}
ALL_SITES = -1
class SQSResult:
pass
class IterationSettings:
pass
class BoostLogLevel:
trace = 1
debug = 2
info = 3
warning = 4
error = 5
class IterationMode:
random = 'random'
systematic = 'systematic'
class Structure:
pass
class Atom:
def __init__(self, z, symbol):
self.Z = z
self.symbol = symbol
def set_log_level(*args, **kwargs):
pass
def pair_sqs_iteration(*args, **kwargs):
pass
def pair_analysis(*args, **kwargs):
pass
def default_shell_distances(*args, **kwargs):
pass
def total_permutations(*args, **kwargs):
pass
def rank_structure(*args, **kwargs):
pass
def atoms_from_numbers(*args, **kwargs):
pass
def atoms_from_symbols(*args, **kwargs):
pass
def available_species(*args, **kwargs):
return [Atom(1, 'H')]
def symbols_from_z(*args, **kwargs):
pass
def z_from_symbols(*args, **kwargs):
pass
def build_configuration(*args, **kwargs):
pass
| 14.044944
| 111
| 0.6584
| 166
| 1,250
| 4.76506
| 0.481928
| 0.151707
| 0.19469
| 0.214918
| 0.184576
| 0.131479
| 0
| 0
| 0
| 0
| 0
| 0.011435
| 0.2304
| 1,250
| 89
| 112
| 14.044944
| 0.810811
| 0.1576
| 0
| 0.304348
| 0
| 0
| 0.016252
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.282609
| false
| 0.304348
| 0
| 0.021739
| 0.586957
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
c2e52ada4a91ab8de68f54118be8f421cae3f519
| 190
|
py
|
Python
|
getting_started/ascii.py
|
AoEiuV020/LearningPython
|
aac0f3f99cfd3d03a96a3c0e41da8f82ea0b8c70
|
[
"MIT"
] | null | null | null |
getting_started/ascii.py
|
AoEiuV020/LearningPython
|
aac0f3f99cfd3d03a96a3c0e41da8f82ea0b8c70
|
[
"MIT"
] | null | null | null |
getting_started/ascii.py
|
AoEiuV020/LearningPython
|
aac0f3f99cfd3d03a96a3c0e41da8f82ea0b8c70
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
assert ascii('中文') == "'\\u4e2d\\u6587'"
assert ascii([]) == '[]'
assert ascii('\x33') == "'3'"
# 自带repr的效果,
assert ascii('asdf') == "'asdf'"
| 21.111111
| 40
| 0.542105
| 23
| 190
| 4.478261
| 0.695652
| 0.427184
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.067073
| 0.136842
| 190
| 8
| 41
| 23.75
| 0.560976
| 0.284211
| 0
| 0
| 0
| 0
| 0.278195
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
6c03e18a6ee03c241c721e936e1f70618b7bc138
| 203
|
py
|
Python
|
opponents/monika/helper-scripts/c14n.py
|
laytonc32/spnati
|
40cf1c51d193f48bbf0e3fdc3644eb2d0e922f7d
|
[
"MIT"
] | null | null | null |
opponents/monika/helper-scripts/c14n.py
|
laytonc32/spnati
|
40cf1c51d193f48bbf0e3fdc3644eb2d0e922f7d
|
[
"MIT"
] | 3
|
2020-03-24T17:26:36.000Z
|
2021-02-02T22:10:30.000Z
|
opponents/monika/helper-scripts/c14n.py
|
laytonc32/spnati
|
40cf1c51d193f48bbf0e3fdc3644eb2d0e922f7d
|
[
"MIT"
] | null | null | null |
import lxml.etree as ET
import sys
import shutil
if __name__ == '__main__':
shutil.copy(sys.argv[1], sys.argv[1]+'.generated')
tree = ET.parse(sys.argv[1])
tree.write_c14n(sys.argv[2])
| 20.3
| 54
| 0.660099
| 33
| 203
| 3.787879
| 0.575758
| 0.224
| 0.192
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035928
| 0.17734
| 203
| 9
| 55
| 22.555556
| 0.712575
| 0
| 0
| 0
| 1
| 0
| 0.08867
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.428571
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
6c1cca63fd909cb450882ff80f7f9ee14abb7a46
| 100
|
py
|
Python
|
__init__.py
|
KoalaaDev/Steam-market-python-api
|
082f7ac0f581e85fdac3e3c794db407e0b054f74
|
[
"MIT"
] | 1
|
2021-06-01T12:08:47.000Z
|
2021-06-01T12:08:47.000Z
|
__init__.py
|
KoalaaDev/Steam-market-python-api
|
082f7ac0f581e85fdac3e3c794db407e0b054f74
|
[
"MIT"
] | null | null | null |
__init__.py
|
KoalaaDev/Steam-market-python-api
|
082f7ac0f581e85fdac3e3c794db407e0b054f74
|
[
"MIT"
] | null | null | null |
"""
Python module to get item prices from Steam Marketplace quickly.
"""
from .steammarket import *
| 20
| 64
| 0.75
| 13
| 100
| 5.769231
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 100
| 4
| 65
| 25
| 0.892857
| 0.64
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
6c59f5004022529ad44ed45d8253b8e6ae1d7987
| 7,564
|
py
|
Python
|
tests/test_data.py
|
altmirai/altpiggybank
|
751590642e0a2a572310923fbd971acd0fdf8527
|
[
"MIT"
] | null | null | null |
tests/test_data.py
|
altmirai/altpiggybank
|
751590642e0a2a572310923fbd971acd0fdf8527
|
[
"MIT"
] | 6
|
2020-08-13T13:45:12.000Z
|
2020-08-14T15:08:41.000Z
|
tests/test_data.py
|
altmirai/altpiggybank
|
751590642e0a2a572310923fbd971acd0fdf8527
|
[
"MIT"
] | 1
|
2020-08-13T16:16:00.000Z
|
2020-08-13T16:16:00.000Z
|
class MockFeeEstOne():
@property
def status_code(self):
return 200
def json(self):
return {"fastestFee": 200, "halfHourFee": 200, "hourFee": 100}
class TestDataOne():
@property
def path(self):
return 'tests/test_files'
@property
def pub_key_file_name(self):
return f"{self.path}/pubKey{self.vkhandle}.pem"
@property
def vkhandle(self):
return '7340043'
@property
def skhandle(self):
return '7340044'
@property
def address(self):
return '1DSRQWjbNXLN8ZZZ6gqcGx1WNZeKHEJXDv'
@property
def confirmed_balance(self):
return 5763656
@property
def all(self):
return True
@property
def fee(self):
return 104600
@property
def recipient(self):
return '1BHznNt5x9rqMQ1dpWy4fw5y5PSJV3ZR3L'
@property
def value(self):
return None
@property
def change_address(self):
return None
@property
def tx_inputs(self):
return [
{
'output_no': 0,
'outpoint_index': b'\x00\x00\x00\x00',
'outpoint_hash': bytearray(
b"Y:N~v%0\xbc\xcf\xbc\xd9@\xc2K\xc3\x92\xc6\xfb\xe7#\xcf\x8e\xf4\xe8\xa9t\xf5m\x1fE\'\x9d"
)
},
{
'output_no': 0,
'outpoint_index': b'\x00\x00\x00\x00',
'outpoint_hash': bytearray(
b'qg/\xe5\x86\xbcvS\xc7t\\D\r\xc4\x1dG8\xe9\xab3\xa4|N.x(\xa7v\xaf\x8d\xcfx'
)
},
{
'output_no': 0,
'outpoint_index': b'\x00\x00\x00\x00',
'outpoint_hash': bytearray(
b'"\x9a\xd5\x904\\^\xac^\xc1\xe6c>\x93mU\xfc\xf8\xab\x17\xf4G[\xae\xd9\x13\xb9\xc6\xe7\x05\x7f_'
)
}
]
@property
def mock_fees(self):
return {
'estimates': {
'Fastest': 97600,
'Half hour': 97600,
'One hour': 48800
},
'n_inputs': 3,
'n_outputs': 1
}
@property
def tosign_tx_hex(self):
return [
'0100000003593a4e7e762530bccfbcd940c24bc392c6fbe723cf8ef4e8a974f56d1f45279d000000001976a914887047f28478e316732db0bccd086482c8617e4a88acffffffff71672fe586bc7653c7745c440dc41d4738e9ab33a47c4e2e7828a776af8dcf780000000000ffffffff229ad590345c5eac5ec1e6633e936d55fcf8ab17f4475baed913b9c6e7057f5f0000000000ffffffff01b0595600000000001976a91470e825c3aa5396f6cfe794bcc6ad61ab9dfa6a4088ac0000000001000000',
'0100000003593a4e7e762530bccfbcd940c24bc392c6fbe723cf8ef4e8a974f56d1f45279d0000000000ffffffff71672fe586bc7653c7745c440dc41d4738e9ab33a47c4e2e7828a776af8dcf78000000001976a914887047f28478e316732db0bccd086482c8617e4a88acffffffff229ad590345c5eac5ec1e6633e936d55fcf8ab17f4475baed913b9c6e7057f5f0000000000ffffffff01b0595600000000001976a91470e825c3aa5396f6cfe794bcc6ad61ab9dfa6a4088ac0000000001000000',
'0100000003593a4e7e762530bccfbcd940c24bc392c6fbe723cf8ef4e8a974f56d1f45279d0000000000ffffffff71672fe586bc7653c7745c440dc41d4738e9ab33a47c4e2e7828a776af8dcf780000000000ffffffff229ad590345c5eac5ec1e6633e936d55fcf8ab17f4475baed913b9c6e7057f5f000000001976a914887047f28478e316732db0bccd086482c8617e4a88acffffffff01b0595600000000001976a91470e825c3aa5396f6cfe794bcc6ad61ab9dfa6a4088ac0000000001000000'
]
@property
def tosign_tx_hashed_hex(self):
return [
'af69b4567cbcd15f2c719a62311ef8fe47711e21c038dad27f3fc631baf21f3c', '600da7c38b14b9bfc44a6deba21621555b1aadba9066a1e76fe4a7e48082748f', 'f5aa21d884e6e5b4eac08803640b6546145b2f2bf34a04945ea7685615454f27'
]
@property
def tx_hex(self):
return '0100000003593a4e7e762530bccfbcd940c24bc392c6fbe723cf8ef4e8a974f56d1f45279d000000008a473044022038096755f89ba2cb28f4b4a7db056bbbe77972560d65da3a55e2ef702fd837f90220196ed119a36cb134000f90ff6048a7b1a838ee65c2369207145305155c3953fa0141046e9cd8479193a02d025d236545e72edf10237e54a64a887df866f8d5b86a0fd55449ad821df8e2568116e52cdee3a6b11d7ae7d5e1920244e2426704c5f58005ffffffff71672fe586bc7653c7745c440dc41d4738e9ab33a47c4e2e7828a776af8dcf78000000008a473044022022bc4f1e0075c943af3065072ee45231846fc3a7e2c9192766ded3a963e207880220415b88dcae7cf63eeb504c6c96ebb3b8049f00cc41dee7ed0309d29c06549e4b0141046e9cd8479193a02d025d236545e72edf10237e54a64a887df866f8d5b86a0fd55449ad821df8e2568116e52cdee3a6b11d7ae7d5e1920244e2426704c5f58005ffffffff229ad590345c5eac5ec1e6633e936d55fcf8ab17f4475baed913b9c6e7057f5f000000008b483045022100f659e8a85019ae4562665a2377caae4535b7674a2478567adcc44133c96cd4bc022045e7076b0e212159323b68cc4a29805e0d12f349d23e8ca8f6fac79a1d9afcc60141046e9cd8479193a02d025d236545e72edf10237e54a64a887df866f8d5b86a0fd55449ad821df8e2568116e52cdee3a6b11d7ae7d5e1920244e2426704c5f58005ffffffff01b0595600000000001976a91470e825c3aa5396f6cfe794bcc6ad61ab9dfa6a4088ac00000000'
@property
def signature_files(self):
signature_file_names = ['signedTx7340043_1.der',
'signedTx7340043_2.der', 'signedTx7340043_3.der']
signature_files = []
for signature_file_name in signature_file_names:
file = open(f'{self.path}/{signature_file_name}', 'rb')
signature_files.append(file)
return signature_files
@property
def aws(self):
return True
@property
def output_path(self):
return 'test_output'
@property
def pem(self):
return '-----BEGIN PUBLIC KEY-----\nMFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEbpzYR5GToC0CXSNlRecu3xAjflSmSoh9\n+Gb41bhqD9VUSa2CHfjiVoEW5Sze46axHXrn1eGSAkTiQmcExfWABQ==\n-----END PUBLIC KEY-----\n'
@property
def addr_json_file(self):
return {
'file_name': f'addr{self.vkhandle}',
'vkhandle': self.vkhandle,
'skhandle': self.skhandle,
'pem': self.pem
}
@property
def addr_csv_file(self):
return [self.vkhandle, self.skhandle, self.address, str(self.confirmed_balance)]
@property
def addr_json_file_name(self):
return f"{self.path}/addr{self.vkhandle}.json"
@property
def bitcoinfees_mock_api(self):
n_inputs = len(self.tx_inputs)
n_outputs = 1 if self.all else 2
bytes = 10 + (n_inputs * 148) + (n_outputs * 34)
resp = {"fastestFee": 100, "halfHourFee": 75, "hourFee": 50}
estimate = {'Fastest': resp['fastestFee'] * bytes,
'Half hour': resp['halfHourFee'] * bytes,
'One hour': resp['hourFee'] * bytes}
return estimate
@property
def signature_file_names(self):
i = 0
sig_file_names = []
while i < len(self.tx_inputs):
sig_file_names.append(f'{self.path}/signedTx{self.vkhandle}_{i+1}.der')
i += 1
return sig_file_names
@property
def tx_json_file_name(self):
return f'{self.path}/tx{self.vkhandle}.json'
@property
def tx_json_file(self):
return {
'file_name': f'tx{self.vkhandle}',
'all': self.all,
'fee': self.fee,
'recipient': self.recipient,
'partial': False if self.all else True,
'vkhandle': self.vkhandle,
'skhandle': self.skhandle,
'pem': self.pem,
'address': self.address,
'confrimed_balance': self.confirmed_balance,
'n_tx_inputs': len(self.tx_inputs)
}
| 39.810526
| 1,181
| 0.68945
| 540
| 7,564
| 9.507407
| 0.312963
| 0.059992
| 0.010518
| 0.010518
| 0.127581
| 0.080639
| 0.080639
| 0.064862
| 0.052785
| 0.033307
| 0
| 0.321159
| 0.228979
| 7,564
| 189
| 1,182
| 40.021164
| 0.559156
| 0
| 0
| 0.320755
| 0
| 0.018868
| 0.500397
| 0.428741
| 0
| 1
| 0
| 0
| 0
| 1
| 0.18239
| false
| 0
| 0
| 0.163522
| 0.377358
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 5
|
6c65ef9e664ba7a7bd44f2b395d1994a590fd6d6
| 510
|
py
|
Python
|
units/mass/ounces.py
|
putridparrot/PyUnits
|
4f1095c6fc0bee6ba936921c391913dbefd9307c
|
[
"MIT"
] | null | null | null |
units/mass/ounces.py
|
putridparrot/PyUnits
|
4f1095c6fc0bee6ba936921c391913dbefd9307c
|
[
"MIT"
] | null | null | null |
units/mass/ounces.py
|
putridparrot/PyUnits
|
4f1095c6fc0bee6ba936921c391913dbefd9307c
|
[
"MIT"
] | null | null | null |
# <auto-generated>
# This code was generated by the UnitCodeGenerator tool
#
# Changes to this file will be lost if the code is regenerated
# </auto-generated>
def to_milligrams(value):
return value * 28349.5231
def to_grams(value):
return value * 28.3495231
def to_kilograms(value):
return value / 35.274
def to_tonnes(value):
return value * 0.0000283495231
def to_pounds(value):
return value * 0.0625
def to_stones(value):
return value / 224.0
def to_carats(value):
return value / 0.00705479
| 23.181818
| 62
| 0.741176
| 80
| 510
| 4.6375
| 0.4875
| 0.09434
| 0.301887
| 0.137466
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129412
| 0.166667
| 510
| 21
| 63
| 24.285714
| 0.743529
| 0.292157
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
66605c85be8be0d5a19ffa3d08ad37d3e5d62a1c
| 47
|
py
|
Python
|
src/csharpyml/sphinxext/__init__.py
|
sdpython/csharpyml
|
f814af89c5b988924a7f31fe71ec6eb515292070
|
[
"MIT"
] | 4
|
2018-06-07T06:34:32.000Z
|
2020-02-12T17:39:58.000Z
|
src/csharpyml/sphinxext/__init__.py
|
sdpython/csharpyml
|
f814af89c5b988924a7f31fe71ec6eb515292070
|
[
"MIT"
] | 13
|
2018-05-21T23:06:58.000Z
|
2018-12-30T17:57:11.000Z
|
src/csharpyml/sphinxext/__init__.py
|
sdpython/csharpyml
|
f814af89c5b988924a7f31fe71ec6eb515292070
|
[
"MIT"
] | null | null | null |
"""
@file
@brielf Shortcut to *sphinxext*.
"""
| 9.4
| 32
| 0.617021
| 5
| 47
| 5.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148936
| 47
| 4
| 33
| 11.75
| 0.725
| 0.808511
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
66e881771ab64ac734ddfcd0593603a588eaaad3
| 126
|
py
|
Python
|
website/core/filters.py
|
tibet5/website
|
937e1941aaadbf7cd0a404a2655858451c01dd54
|
[
"MIT"
] | null | null | null |
website/core/filters.py
|
tibet5/website
|
937e1941aaadbf7cd0a404a2655858451c01dd54
|
[
"MIT"
] | null | null | null |
website/core/filters.py
|
tibet5/website
|
937e1941aaadbf7cd0a404a2655858451c01dd54
|
[
"MIT"
] | null | null | null |
import django_filters
from .forms import DateField
class DateFilter(django_filters.DateFilter):
field_class = DateField
| 18
| 44
| 0.81746
| 15
| 126
| 6.666667
| 0.6
| 0.26
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.134921
| 126
| 6
| 45
| 21
| 0.917431
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
dd2586111563b469258f0f829bf2e71462afefc7
| 83
|
py
|
Python
|
microfilm/dataset/__init__.py
|
guiwitz/microfilm
|
582bb487ec14603b67d52b904e1ab90317fcfaba
|
[
"BSD-3-Clause"
] | 22
|
2021-04-09T19:07:35.000Z
|
2022-03-16T21:46:10.000Z
|
microfilm/dataset/__init__.py
|
habi/microfilm
|
971765734276d58d909ef41cfae5a45ab8081ef1
|
[
"BSD-3-Clause"
] | 9
|
2021-09-01T13:52:54.000Z
|
2022-03-30T11:53:50.000Z
|
microfilm/dataset/__init__.py
|
habi/microfilm
|
971765734276d58d909ef41cfae5a45ab8081ef1
|
[
"BSD-3-Clause"
] | 2
|
2021-07-06T12:49:53.000Z
|
2022-03-18T14:17:25.000Z
|
from .dataset import (Data, TIFFSeries, MultipageTIFF, H5, ND2, Nparray, findfiles)
| 83
| 83
| 0.783133
| 10
| 83
| 6.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027027
| 0.108434
| 83
| 1
| 83
| 83
| 0.851351
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
dd5925e179f13b4c6b36c43192408cc3f6311afa
| 113
|
py
|
Python
|
Data_Conversion/Kfunc/Rel/__init__.py
|
simay1224/K-project-UI
|
c69f83b6446052a1cd32a00700e7db197f36a1ed
|
[
"Apache-2.0"
] | null | null | null |
Data_Conversion/Kfunc/Rel/__init__.py
|
simay1224/K-project-UI
|
c69f83b6446052a1cd32a00700e7db197f36a1ed
|
[
"Apache-2.0"
] | 1
|
2018-06-19T22:21:43.000Z
|
2018-06-19T22:21:43.000Z
|
Data_Conversion/Kfunc/Rel/__init__.py
|
simay1224/K-project-UI
|
c69f83b6446052a1cd32a00700e7db197f36a1ed
|
[
"Apache-2.0"
] | 3
|
2018-08-29T18:39:57.000Z
|
2020-06-05T15:29:07.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 15 17:11:32 2016
@author: medialab
"""
from reliability import *
| 14.125
| 35
| 0.637168
| 17
| 113
| 4.235294
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.141304
| 0.185841
| 113
| 7
| 36
| 16.142857
| 0.641304
| 0.681416
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
dd7120c591c7e36b9c8e88b641d0f11b530b81b6
| 168
|
py
|
Python
|
Clarinet/utils/convert/__init__.py
|
rohans0509/Clarinet
|
0a7a6a5e6a91f93956b6b5739cab1f030655cac8
|
[
"MIT"
] | 1
|
2022-01-28T20:30:07.000Z
|
2022-01-28T20:30:07.000Z
|
Clarinet/utils/convert/__init__.py
|
rohans0509/Clarinet
|
0a7a6a5e6a91f93956b6b5739cab1f030655cac8
|
[
"MIT"
] | null | null | null |
Clarinet/utils/convert/__init__.py
|
rohans0509/Clarinet
|
0a7a6a5e6a91f93956b6b5739cab1f030655cac8
|
[
"MIT"
] | 2
|
2021-11-23T13:55:10.000Z
|
2021-11-23T13:56:57.000Z
|
# from .audio2midi import audio2midi
from .midi2audio import midi2audio
from .midi2text import midi2text
from .remi2midi import remi2midi
from .midi2png import midi2png
| 33.6
| 36
| 0.845238
| 20
| 168
| 7.1
| 0.35
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.067568
| 0.119048
| 168
| 5
| 37
| 33.6
| 0.891892
| 0.202381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
dd9dbffb2c35724257598fc62079dd10eb734945
| 327
|
py
|
Python
|
python/testData/inspections/PyArgumentListInspection/topLevelOverloadsAndImplementation.py
|
truthiswill/intellij-community
|
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/inspections/PyArgumentListInspection/topLevelOverloadsAndImplementation.py
|
truthiswill/intellij-community
|
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/inspections/PyArgumentListInspection/topLevelOverloadsAndImplementation.py
|
truthiswill/intellij-community
|
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
from typing import overload
@overload
def foo(value: None) -> None:
pass
@overload
def foo(value: int) -> str:
pass
@overload
def foo(value: str) -> str:
pass
def foo(value):
return None
foo(<warning descr="Parameter(s) unfilledPossible callees:foo(value: None)foo(value: int)foo(value: str)">)</warning>
| 16.35
| 117
| 0.681957
| 47
| 327
| 4.744681
| 0.382979
| 0.251121
| 0.197309
| 0.255605
| 0.206278
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.17737
| 327
| 20
| 117
| 16.35
| 0.828996
| 0
| 0
| 0.461538
| 0
| 0.076923
| 0.256098
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.230769
| 0.076923
| null | null | 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
06ec07fad8cdcf1d11714d3c3f7151141e03fe21
| 4,123
|
py
|
Python
|
sevapp/forms.py
|
sm8799/secure_e_vote
|
edc5d7abbcaf8d3377a3639adc2ca03c5472c1f6
|
[
"MIT"
] | 6
|
2020-03-27T11:48:01.000Z
|
2021-07-28T20:59:50.000Z
|
sevapp/forms.py
|
sm8799/secure_e_vote
|
edc5d7abbcaf8d3377a3639adc2ca03c5472c1f6
|
[
"MIT"
] | null | null | null |
sevapp/forms.py
|
sm8799/secure_e_vote
|
edc5d7abbcaf8d3377a3639adc2ca03c5472c1f6
|
[
"MIT"
] | 5
|
2020-11-14T15:01:47.000Z
|
2021-12-02T05:08:59.000Z
|
from django import forms
from sevapp.models import Admin, Entry, Election, Poff, Candidate, Voter
from django.core.validators import MaxValueValidator, MinValueValidator
class EntryForm(forms.ModelForm):
class Meta():
model = Entry
fields = ('Aadhaar_Number', 'name')
widgets = {
'Aadhaar_Number':forms.NumberInput(attrs = {'class':'form-control', 'placeholder':'987456321456', 'min':'100000000000'}),
'name':forms.TextInput(attrs={'class':'form-control', 'placeholder':'secureevote'})
}
def clean(self):
cleaned_data = super().clean()
aadhar = self.cleaned_data.get('Aadhaar_Number')
if len(str(aadhar)) != 12:
# Only do something if both fields are valid so far.
raise forms.ValidationError('invalid aadhar')
class AdminForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput(attrs={'class':'form-control'}))
class Meta():
model = Admin
fields = ('Email',)
widgets = {
'Email':forms.EmailInput(attrs = {'class':'form-control', 'placeholder':'example@gmail.com'}),
}
login_choices = (
("1","Admin"),
("2","Voter"),
("3","P.O"),
)
class LoginForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput(
attrs = {
'class':'form-control',
}
))
Aadhaar_Number = forms.IntegerField(validators = [MinValueValidator(100000000000), MaxValueValidator(999999999999)], widget=forms.NumberInput(
attrs = {
'class':'form-control', 'placeholder':'987456321456','min':'100000000000', 'max':'999999999999'
}
))
Email = forms.EmailField(widget=forms.EmailInput(
attrs = {
'class':'form-control', 'placeholder':'example@gmail.com',
}
))
class Meta():
model = Entry
fields = ('Aadhaar_Number', )
field_order = ['Aadhaar_Number', 'Email', 'password']
def clean(self):
cleaned_data = super().clean()
aadhar = self.cleaned_data.get('Aadhaar_Number')
if len(str(aadhar)) != 12:
# Only do something if both fields are valid so far.
raise forms.ValidationError('invalid aadhar')
class ElectionForm(forms.ModelForm):
class Meta():
model = Election
fields = ('election_name',)
widgets = {
'election_name':forms.TextInput(attrs = {'class':'form-control', 'placeholder':'Sunitya_Pizza_Council'}),
}
class PoffForm(forms.ModelForm):
class Meta():
model = Poff
fields = ('Email',)
widgets = {
'Email':forms.EmailInput(attrs = {'class':'form-control', 'placeholder':'example@gmail.com'}),
}
class CandidateForm(forms.ModelForm):
class Meta():
model = Candidate
fields = ('profile_pic', 'election')
name = forms.CharField(widget= forms.TextInput(
attrs = {
'class':'form-control'
}
))
Aadhaar_Number = forms.IntegerField(validators = [MinValueValidator(100000000000), MaxValueValidator(999999999999)], widget=forms.NumberInput(
attrs = {
'class':'form-control', 'min':'100000000000', 'max':'999999999999',
}
))
profile_pic = forms.ImageField(widget=forms.FileInput(
attrs={
'class':'form-control',
'id':'file',
}
))
def clean(self):
cleaned_data = super().clean()
aadhar = self.cleaned_data.get('Aadhaar_Number')
if len(str(aadhar)) != 12:
# Only do something if both fields are valid so far.
raise forms.ValidationError('invalid aadhar')
field_order = ['name', 'Aadhaar_Number', 'election', 'profile_pic']
class VoterForm(forms.ModelForm):
class Meta():
model = Voter
fields = ('Email', )
name = forms.CharField(widget= forms.TextInput(
attrs = {
'class':'form-control'
}
))
Aadhaar_Number = forms.IntegerField(validators = [MinValueValidator(100000000000), MaxValueValidator(999999999999)], widget=forms.NumberInput(
attrs = {
'class':'form-control', 'min':'100000000000', 'max':'999999999999',
}
))
Email = forms.EmailField(widget=forms.EmailInput(
attrs = {
'class':'form-control'
}
))
def clean(self):
cleaned_data = super().clean()
aadhar = self.cleaned_data.get('Aadhaar_Number')
if len(str(aadhar)) != 12:
# Only do something if both fields are valid so far.
raise forms.ValidationError('invalid aadhar')
field_order = ['name', 'Aadhaar_Number', 'Email']
| 29.661871
| 143
| 0.685666
| 460
| 4,123
| 6.076087
| 0.208696
| 0.053667
| 0.075134
| 0.112701
| 0.800716
| 0.755635
| 0.755635
| 0.730233
| 0.694454
| 0.694454
| 0
| 0.0544
| 0.148436
| 4,123
| 139
| 144
| 29.661871
| 0.741669
| 0.049236
| 0
| 0.588235
| 0
| 0
| 0.225996
| 0.005363
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033613
| false
| 0.02521
| 0.02521
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
661f1fdd5f1653e325d7fc0d179849f58bfe3fd1
| 51
|
py
|
Python
|
enthought/pyface/ui/null/widget.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 3
|
2016-12-09T06:05:18.000Z
|
2018-03-01T13:00:29.000Z
|
enthought/pyface/ui/null/widget.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 1
|
2020-12-02T00:51:32.000Z
|
2020-12-02T08:48:55.000Z
|
enthought/pyface/ui/null/widget.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | null | null | null |
# proxy module
from pyface.ui.null.widget import *
| 17
| 35
| 0.764706
| 8
| 51
| 4.875
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137255
| 51
| 2
| 36
| 25.5
| 0.886364
| 0.235294
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b09b7c0697991044dd40bc14c2c5d1026788f7a9
| 201
|
py
|
Python
|
example/argparse_grin.py
|
sergei-dyshel/genzshcomp
|
8fb655070d03876f4f2fc0d84e6ba6e48dc19b76
|
[
"BSD-3-Clause"
] | null | null | null |
example/argparse_grin.py
|
sergei-dyshel/genzshcomp
|
8fb655070d03876f4f2fc0d84e6ba6e48dc19b76
|
[
"BSD-3-Clause"
] | null | null | null |
example/argparse_grin.py
|
sergei-dyshel/genzshcomp
|
8fb655070d03876f4f2fc0d84e6ba6e48dc19b76
|
[
"BSD-3-Clause"
] | null | null | null |
from grin import get_grin_arg_parser
from genzshcomp import CompletionGenerator
if __name__ == '__main__':
generator = CompletionGenerator("grin", get_grin_arg_parser())
print generator.get()
| 28.714286
| 66
| 0.78607
| 24
| 201
| 6
| 0.541667
| 0.097222
| 0.138889
| 0.222222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.134328
| 201
| 6
| 67
| 33.5
| 0.827586
| 0
| 0
| 0
| 0
| 0
| 0.059701
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.4
| null | null | 0.2
| 1
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
b0d45c003f7f25b749c9c6062913e453f971618f
| 309
|
py
|
Python
|
Latest/venv/Lib/site-packages/apptools/selection/api.py
|
adamcvj/SatelliteTracker
|
49a8f26804422fdad6f330a5548e9f283d84a55d
|
[
"Apache-2.0"
] | 1
|
2022-01-09T20:04:31.000Z
|
2022-01-09T20:04:31.000Z
|
Latest/venv/Lib/site-packages/apptools/selection/api.py
|
adamcvj/SatelliteTracker
|
49a8f26804422fdad6f330a5548e9f283d84a55d
|
[
"Apache-2.0"
] | 1
|
2022-02-15T12:01:57.000Z
|
2022-03-24T19:48:47.000Z
|
Latest/venv/Lib/site-packages/apptools/selection/api.py
|
adamcvj/SatelliteTracker
|
49a8f26804422fdad6f330a5548e9f283d84a55d
|
[
"Apache-2.0"
] | null | null | null |
from .errors import (IDConflictError, ListenerNotConnectedError,
ProviderNotRegisteredError)
from .i_selection import ISelection, IListSelection
from .i_selection_provider import ISelectionProvider
from .list_selection import ListSelection
from .selection_service import SelectionService
| 44.142857
| 64
| 0.825243
| 28
| 309
| 8.928571
| 0.571429
| 0.04
| 0.112
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142395
| 309
| 6
| 65
| 51.5
| 0.943396
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.833333
| 0
| 0.833333
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b0fa95f5e1c46b524a3c4efc424dfe6628a465c5
| 167
|
py
|
Python
|
kovot/__init__.py
|
kazh98/kovot
|
0d17ff238f362e647be312fc758ef7634e581141
|
[
"MIT"
] | 5
|
2015-06-10T20:26:04.000Z
|
2019-02-06T04:52:53.000Z
|
kovot/__init__.py
|
kazh98/kovot
|
0d17ff238f362e647be312fc758ef7634e581141
|
[
"MIT"
] | 7
|
2018-08-26T14:08:13.000Z
|
2019-01-08T13:34:32.000Z
|
kovot/__init__.py
|
kazh98/kovot
|
0d17ff238f362e647be312fc758ef7634e581141
|
[
"MIT"
] | 1
|
2018-07-16T09:35:59.000Z
|
2018-07-16T09:35:59.000Z
|
# coding: utf-8
from .bot import Bot
from .message import Message
from .response import Response
from .speaker import Speaker
from .remote_mod import RemoteCallerMod
| 20.875
| 39
| 0.808383
| 24
| 167
| 5.583333
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006993
| 0.143713
| 167
| 8
| 39
| 20.875
| 0.93007
| 0.077844
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b0fd201e11ac42f6d392e661c1e5b7b04529e4ab
| 30
|
py
|
Python
|
classifier/__init__.py
|
PrasanthChettri/GenderClassifierNN
|
e63da7ff9c73ab625a49a6bf65b12aef8f8bcda0
|
[
"MIT"
] | 4
|
2021-03-22T14:40:01.000Z
|
2021-03-23T09:10:23.000Z
|
classifier/__init__.py
|
PurpleText/GenderClassifierNN
|
595623aedf05c89fc44e6af1ce5ef68b2148a42a
|
[
"MIT"
] | null | null | null |
classifier/__init__.py
|
PurpleText/GenderClassifierNN
|
595623aedf05c89fc44e6af1ce5ef68b2148a42a
|
[
"MIT"
] | null | null | null |
#File intentionally left blank
| 30
| 30
| 0.866667
| 4
| 30
| 6.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 30
| 1
| 30
| 30
| 0.962963
| 0.966667
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
9fd117b0314b6dc2116a03aab9ac2e035c4dba9d
| 331
|
py
|
Python
|
tests/testapp/views/authorization.py
|
theY4Kman/pytest-drf
|
1c86e023ebe7353e89aa71a7fc3b15457b5b20bd
|
[
"MIT"
] | 58
|
2020-02-09T07:13:57.000Z
|
2021-12-06T10:00:15.000Z
|
tests/testapp/views/authorization.py
|
theY4Kman/pytest-drf
|
1c86e023ebe7353e89aa71a7fc3b15457b5b20bd
|
[
"MIT"
] | 10
|
2020-07-27T09:21:51.000Z
|
2021-09-11T20:14:45.000Z
|
tests/testapp/views/authorization.py
|
theY4Kman/pytest-drf
|
1c86e023ebe7353e89aa71a7fc3b15457b5b20bd
|
[
"MIT"
] | 5
|
2020-07-27T08:39:48.000Z
|
2021-12-26T07:08:55.000Z
|
from rest_framework import permissions
from rest_framework.decorators import api_view, permission_classes
from rest_framework.request import Request
from rest_framework.response import Response
@api_view()
@permission_classes([permissions.IsAuthenticated])
def login_required(request: Request) -> Response:
return Response()
| 30.090909
| 66
| 0.839879
| 40
| 331
| 6.725
| 0.425
| 0.118959
| 0.252788
| 0.178439
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096677
| 331
| 10
| 67
| 33.1
| 0.899666
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.5
| 0.125
| 0.75
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 5
|
9fe17499ba0eb4da2b2c228c7a1963e7a2de2e40
| 79
|
py
|
Python
|
apps/ImageSearch/algs/LassoLinear/__init__.py
|
AmFamMLTeam/NEXT
|
2b604230395be1b98e84115c20b5f509d5f24411
|
[
"Apache-2.0"
] | 2
|
2020-11-16T17:01:36.000Z
|
2022-03-04T17:07:59.000Z
|
apps/ImageSearch/algs/LassoLinear/__init__.py
|
AmFamMLTeam/NEXT
|
2b604230395be1b98e84115c20b5f509d5f24411
|
[
"Apache-2.0"
] | null | null | null |
apps/ImageSearch/algs/LassoLinear/__init__.py
|
AmFamMLTeam/NEXT
|
2b604230395be1b98e84115c20b5f509d5f24411
|
[
"Apache-2.0"
] | null | null | null |
from apps.ImageSearch.algs.LassoLinear.LassoLinear import LassoLinear as MyAlg
| 39.5
| 78
| 0.873418
| 10
| 79
| 6.9
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075949
| 79
| 1
| 79
| 79
| 0.945205
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
9ffa0de8d3b506048db6f1621202bd17a6a7cf08
| 193
|
py
|
Python
|
newsletter/models.py
|
Tobi-De/devblog
|
341458ecf427069b5fbb4dcbe78ec3664f75d072
|
[
"MIT"
] | null | null | null |
newsletter/models.py
|
Tobi-De/devblog
|
341458ecf427069b5fbb4dcbe78ec3664f75d072
|
[
"MIT"
] | null | null | null |
newsletter/models.py
|
Tobi-De/devblog
|
341458ecf427069b5fbb4dcbe78ec3664f75d072
|
[
"MIT"
] | null | null | null |
from django.db import models
from model_utils.models import TimeStampedModel
class Signup(TimeStampedModel):
email = models.EmailField()
def __str__(self):
return self.email
| 19.3
| 47
| 0.746114
| 23
| 193
| 6.043478
| 0.695652
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.186529
| 193
| 9
| 48
| 21.444444
| 0.88535
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.333333
| 0.166667
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 5
|
b02a69fd4f027efa0c54892d954f8369fc0ec2d9
| 1,047
|
py
|
Python
|
tests/handler.py
|
josephsalimin/flask-router-wrapper
|
cc6414c82a5297d88decf3e587699de7298b3a97
|
[
"MIT"
] | 6
|
2020-08-01T12:39:17.000Z
|
2020-08-13T14:00:45.000Z
|
tests/handler.py
|
josephsalimin/flask-router-wrapper
|
cc6414c82a5297d88decf3e587699de7298b3a97
|
[
"MIT"
] | 2
|
2021-03-30T01:13:33.000Z
|
2021-06-02T02:47:58.000Z
|
tests/handler.py
|
josephsalimin/flask-router-wrapper
|
cc6414c82a5297d88decf3e587699de7298b3a97
|
[
"MIT"
] | null | null | null |
from flask import g, jsonify
from flask_router_wrapper import Middleware
class SetValueMiddleware(Middleware):
def _exec(self, next_function, *args, **kwargs):
g.val = 0
return next_function(*args, **kwargs)
class IncrementValueMiddleware(Middleware):
def _exec(self, next_function, *args, **kwargs):
g.val += 1
return next_function(*args, **kwargs)
class SetValueCallable:
def __call__(self, next_function, *args, **kwargs):
g.val = 0
return next_function(*args, **kwargs)
class IncrementValueCallable:
def __call__(self, next_function, *args, **kwargs):
g.val += 1
return next_function(*args, **kwargs)
class NotCallableMiddleware:
pass
def set_value_middleware(next_function, *args, **kwargs):
g.val = 0
return next_function(*args, **kwargs)
def increment_value_middleware(next_function, *args, **kwargs):
g.val += 1
return next_function(*args, **kwargs)
def index_handler():
return jsonify({"message": "hello"})
def value_json_handler():
return jsonify({"value": g.val})
| 21.367347
| 63
| 0.711557
| 132
| 1,047
| 5.409091
| 0.265152
| 0.201681
| 0.268908
| 0.369748
| 0.630252
| 0.630252
| 0.621849
| 0.621849
| 0.579832
| 0.560224
| 0
| 0.00681
| 0.158548
| 1,047
| 48
| 64
| 21.8125
| 0.803632
| 0
| 0
| 0.533333
| 0
| 0
| 0.016237
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.266667
| false
| 0.033333
| 0.066667
| 0.066667
| 0.766667
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
b04219a045d5f9eaea190acb5e77a5c888632bf7
| 5,601
|
py
|
Python
|
sdk/turing/generated/models/__init__.py
|
LeonLnj/turing
|
93817f5cfb40d056a707bd85e9265b5cafdaeb94
|
[
"Apache-2.0"
] | null | null | null |
sdk/turing/generated/models/__init__.py
|
LeonLnj/turing
|
93817f5cfb40d056a707bd85e9265b5cafdaeb94
|
[
"Apache-2.0"
] | null | null | null |
sdk/turing/generated/models/__init__.py
|
LeonLnj/turing
|
93817f5cfb40d056a707bd85e9265b5cafdaeb94
|
[
"Apache-2.0"
] | null | null | null |
# flake8: noqa
# import all models into this package
# if you have many models here with many references from one model to another this may
# raise a RecursionError
# to avoid this, import only the models that you directly need like:
# from from turing.generated.model.pet import Pet
# or import this package, but before doing it, use:
# import sys
# sys.setrecursionlimit(n)
from turing.generated.model.big_query_config import BigQueryConfig
from turing.generated.model.big_query_dataset import BigQueryDataset
from turing.generated.model.big_query_dataset_all_of import BigQueryDatasetAllOf
from turing.generated.model.big_query_dataset_config import BigQueryDatasetConfig
from turing.generated.model.big_query_sink import BigQuerySink
from turing.generated.model.big_query_sink_all_of import BigQuerySinkAllOf
from turing.generated.model.big_query_sink_config import BigQuerySinkConfig
from turing.generated.model.dataset import Dataset
from turing.generated.model.enricher import Enricher
from turing.generated.model.ensembler import Ensembler
from turing.generated.model.ensembler_config import EnsemblerConfig
from turing.generated.model.ensembler_config_kind import EnsemblerConfigKind
from turing.generated.model.ensembler_docker_config import EnsemblerDockerConfig
from turing.generated.model.ensembler_infra_config import EnsemblerInfraConfig
from turing.generated.model.ensembler_job_status import EnsemblerJobStatus
from turing.generated.model.ensembler_pyfunc_config import EnsemblerPyfuncConfig
from turing.generated.model.ensembler_standard_config import EnsemblerStandardConfig
from turing.generated.model.ensembler_standard_config_experiment_mappings import EnsemblerStandardConfigExperimentMappings
from turing.generated.model.ensembler_type import EnsemblerType
from turing.generated.model.ensemblers_paginated_results import EnsemblersPaginatedResults
from turing.generated.model.ensemblers_paginated_results_all_of import EnsemblersPaginatedResultsAllOf
from turing.generated.model.ensemblers_paginated_results_all_of1 import EnsemblersPaginatedResultsAllOf1
from turing.generated.model.ensembling_job import EnsemblingJob
from turing.generated.model.ensembling_job_ensembler_spec import EnsemblingJobEnsemblerSpec
from turing.generated.model.ensembling_job_ensembler_spec_result import EnsemblingJobEnsemblerSpecResult
from turing.generated.model.ensembling_job_meta import EnsemblingJobMeta
from turing.generated.model.ensembling_job_paginated_results import EnsemblingJobPaginatedResults
from turing.generated.model.ensembling_job_paginated_results_all_of import EnsemblingJobPaginatedResultsAllOf
from turing.generated.model.ensembling_job_prediction_source import EnsemblingJobPredictionSource
from turing.generated.model.ensembling_job_prediction_source_all_of import EnsemblingJobPredictionSourceAllOf
from turing.generated.model.ensembling_job_result_type import EnsemblingJobResultType
from turing.generated.model.ensembling_job_sink import EnsemblingJobSink
from turing.generated.model.ensembling_job_source import EnsemblingJobSource
from turing.generated.model.ensembling_job_spec import EnsemblingJobSpec
from turing.generated.model.ensembling_resources import EnsemblingResources
from turing.generated.model.env_var import EnvVar
from turing.generated.model.event import Event
from turing.generated.model.experiment_config import ExperimentConfig
from turing.generated.model.field_source import FieldSource
from turing.generated.model.generic_dataset import GenericDataset
from turing.generated.model.generic_ensembler import GenericEnsembler
from turing.generated.model.generic_sink import GenericSink
from turing.generated.model.id_object import IdObject
from turing.generated.model.job_id import JobId
from turing.generated.model.kafka_config import KafkaConfig
from turing.generated.model.label import Label
from turing.generated.model.log_level import LogLevel
from turing.generated.model.pagination_paging import PaginationPaging
from turing.generated.model.project import Project
from turing.generated.model.py_func_ensembler import PyFuncEnsembler
from turing.generated.model.py_func_ensembler_all_of import PyFuncEnsemblerAllOf
from turing.generated.model.resource_request import ResourceRequest
from turing.generated.model.result_logger_type import ResultLoggerType
from turing.generated.model.route import Route
from turing.generated.model.router import Router
from turing.generated.model.router_config import RouterConfig
from turing.generated.model.router_config_config import RouterConfigConfig
from turing.generated.model.router_config_config_log_config import RouterConfigConfigLogConfig
from turing.generated.model.router_details import RouterDetails
from turing.generated.model.router_details_all_of import RouterDetailsAllOf
from turing.generated.model.router_ensembler_config import RouterEnsemblerConfig
from turing.generated.model.router_events import RouterEvents
from turing.generated.model.router_id import RouterId
from turing.generated.model.router_id_and_version import RouterIdAndVersion
from turing.generated.model.router_id_object import RouterIdObject
from turing.generated.model.router_status import RouterStatus
from turing.generated.model.router_version import RouterVersion
from turing.generated.model.router_version_log_config import RouterVersionLogConfig
from turing.generated.model.router_version_status import RouterVersionStatus
from turing.generated.model.save_mode import SaveMode
from turing.generated.model.traffic_rule import TrafficRule
from turing.generated.model.traffic_rule_condition import TrafficRuleCondition
| 66.678571
| 122
| 0.894305
| 700
| 5,601
| 6.962857
| 0.237143
| 0.149774
| 0.284571
| 0.359458
| 0.437423
| 0.342224
| 0.194707
| 0.085761
| 0
| 0
| 0
| 0.000572
| 0.063382
| 5,601
| 83
| 123
| 67.481928
| 0.928517
| 0.063739
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b0539649ebd1baad412883f086136b3490c927c3
| 230
|
py
|
Python
|
rockit/plugins/picamera/views.py
|
acreations/rockit-server
|
4d1e87b563d9339e73bf0e5c698a59e8e124cc01
|
[
"MIT"
] | null | null | null |
rockit/plugins/picamera/views.py
|
acreations/rockit-server
|
4d1e87b563d9339e73bf0e5c698a59e8e124cc01
|
[
"MIT"
] | null | null | null |
rockit/plugins/picamera/views.py
|
acreations/rockit-server
|
4d1e87b563d9339e73bf0e5c698a59e8e124cc01
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render_to_response
from django.template import RequestContext
def partials_settings(request):
return render_to_response('partials/settings-picamera.html', context_instance=RequestContext(request))
| 46
| 106
| 0.856522
| 28
| 230
| 6.821429
| 0.642857
| 0.104712
| 0.167539
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.073913
| 230
| 5
| 106
| 46
| 0.896714
| 0
| 0
| 0
| 0
| 0
| 0.134199
| 0.134199
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
|
0
| 5
|
c6609321d889824da523df62d4756990aead374f
| 5,964
|
py
|
Python
|
evm_gm_tool/tool/forms.py
|
manhnd1112/GR
|
2ee9da122afeb33b3ee589a7f64d3f74d2654a1a
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
evm_gm_tool/tool/forms.py
|
manhnd1112/GR
|
2ee9da122afeb33b3ee589a7f64d3f74d2654a1a
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 11
|
2020-03-24T15:46:05.000Z
|
2022-03-11T23:20:58.000Z
|
evm_gm_tool/tool/forms.py
|
manhnd1112/GR
|
2ee9da122afeb33b3ee589a7f64d3f74d2654a1a
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
from .models import User, Project, ProjectMember, UserProfile
from django import forms
from django.contrib.auth.forms import UserCreationForm, UserChangeForm, PasswordChangeForm
from django.contrib.auth.forms import AuthenticationForm
class CustomAuthForm(AuthenticationForm):
username = forms.CharField(widget=forms.TextInput(
attrs={
'class':'validate',
'placeholder': 'Username'
}
))
password = forms.CharField(widget=forms.PasswordInput(
attrs={
'placeholder':'Password'
}
))
class UserCreateForm(UserCreationForm):
email = forms.CharField(widget=forms.EmailInput(
attrs={'class': 'validate form-control'}
))
username = forms.CharField(widget=forms.TextInput(
attrs={
'class': 'form-control'
}
))
first_name = forms.CharField(widget=forms.TextInput(
attrs={
'class': 'form-control'
}
))
last_name = forms.CharField(widget=forms.TextInput(
attrs={
'class': 'form-control'
}
))
password1 = forms.CharField(widget=forms.PasswordInput(
attrs={
'class': 'form-control'
}
), label='Password')
password2 = forms.CharField(widget=forms.PasswordInput(
attrs={
'class': 'form-control'
}
), label='Password Confirmation')
class Meta:
model = User
fields = (
'email',
'username',
'first_name',
'last_name',
'password1',
'password2',
'is_superuser',
'is_active'
)
labels = {
'is_superuser': 'Is Admin',
}
#exclude = ()
class UserEditForm(UserChangeForm):
email = forms.CharField(widget=forms.EmailInput(
attrs={'class': 'validate form-control'}
))
username = forms.CharField(widget=forms.TextInput(
attrs={
'class': 'form-control'
}
))
first_name = forms.CharField(widget=forms.TextInput(
attrs={
'class': 'form-control'
}
))
last_name = forms.CharField(widget=forms.TextInput(
attrs={
'class': 'form-control'
}
))
class Meta:
model = User
fields = (
'email',
'username',
'first_name',
'last_name',
'is_superuser',
'is_active',
'password'
)
labels = {
'is_superuser': 'Is Admin',
}
class ProjectCreationForm(forms.ModelForm):
name = forms.CharField(widget=forms.TextInput(
attrs={'class': 'validate form-control'}
))
desc = forms.CharField(widget=forms.Textarea(
attrs={
'class': 'form-control'
}
), required=False, label="Description")
budget = forms.FloatField(widget=forms.NumberInput(
attrs={
'class': 'form-control',
'step': 'any'
}
))
pd = forms.FloatField(widget=forms.NumberInput(
attrs={
'class': 'form-control',
'step': 'any'
}
), label="Project Duration")
class Meta:
model = Project
fields = (
'id',
'name',
'desc',
'status',
'budget',
'pd',
'owner'
)
labels = {
'desc': 'Description'
}
class ProjectEditForm(forms.ModelForm):
id = forms.IntegerField(widget=forms.NumberInput(
attrs={
'class': 'form-control'
}
))
name = forms.CharField(widget=forms.TextInput(
attrs={'class': 'validate form-control'}
))
desc = forms.CharField(widget=forms.Textarea(
attrs={
'class': 'form-control'
}
), required=False, label="Description")
budget = forms.FloatField(widget=forms.NumberInput(
attrs={
'class': 'form-control',
'step': 'any'
}
))
pd = forms.FloatField(widget=forms.NumberInput(
attrs={
'class': 'form-control',
'step': 'any'
}
), label="Planned Duration")
class Meta:
model = Project
fields = (
'id',
'name',
'desc',
'status',
'budget',
'pd',
'owner'
)
labels = {
'desc': 'Description'
}
class ProjectViewForm(forms.ModelForm):
class Meta:
model = Project
fields = (
'id',
'name',
'desc',
'status',
'budget',
'pd',
'owner'
)
labels = {
'desc': 'Description',
'pd': 'Project Duration'
}
class EditProfileForm(forms.ModelForm):
role = forms.CharField(widget=forms.TextInput(
attrs={
'class': 'form-control role'
}
), required=False, label='Work as')
desc = forms.CharField(widget=forms.Textarea(
attrs={
'class': 'form-control desc'
}
), required=False, label='Bio')
class Meta:
model = UserProfile
fields = (
'avatar',
'role',
'desc'
)
labels = {
'role': 'Work as'
}
class ChangePasswordForm(PasswordChangeForm):
old_password = forms.CharField(widget=forms.PasswordInput(
attrs={
'class': 'form-control'
}
), label="Old password")
new_password1 = forms.CharField(widget=forms.PasswordInput(
attrs={
'class': 'form-control'
}
), label="New password")
new_password2 = forms.CharField(widget=forms.PasswordInput(
attrs={
'class': 'form-control'
}
), label="New password confirmation")
| 24.243902
| 90
| 0.504359
| 478
| 5,964
| 6.257322
| 0.16318
| 0.09562
| 0.140421
| 0.175527
| 0.754931
| 0.738883
| 0.717486
| 0.683383
| 0.663323
| 0.644935
| 0
| 0.001591
| 0.367706
| 5,964
| 246
| 91
| 24.243902
| 0.791567
| 0.002012
| 0
| 0.638009
| 0
| 0
| 0.171736
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.076923
| 0.0181
| 0
| 0.199095
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
c66ee2c17ceba0576af01fb07a9029c902e624e3
| 51
|
py
|
Python
|
logging_logger/__init__.py
|
payalkutana/logging_logger
|
7dc7ccaee63e922b7c7f699a5d0b4e1317a27502
|
[
"MIT"
] | null | null | null |
logging_logger/__init__.py
|
payalkutana/logging_logger
|
7dc7ccaee63e922b7c7f699a5d0b4e1317a27502
|
[
"MIT"
] | null | null | null |
logging_logger/__init__.py
|
payalkutana/logging_logger
|
7dc7ccaee63e922b7c7f699a5d0b4e1317a27502
|
[
"MIT"
] | null | null | null |
from logging_logger.loggerClass import loggerClass
| 25.5
| 50
| 0.901961
| 6
| 51
| 7.5
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078431
| 51
| 1
| 51
| 51
| 0.957447
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
c67db61cde8f72abdc0a2d00a7534d1e464f0de6
| 48
|
py
|
Python
|
runtimes/zoo.py
|
NateFerrero/zoo
|
744fd811e87fa6ae9857e930df5cf13574934be0
|
[
"MIT"
] | null | null | null |
runtimes/zoo.py
|
NateFerrero/zoo
|
744fd811e87fa6ae9857e930df5cf13574934be0
|
[
"MIT"
] | 1
|
2015-02-17T11:11:20.000Z
|
2015-02-17T11:11:20.000Z
|
runtimes/zoo.py
|
NateFerrero/zoo
|
744fd811e87fa6ae9857e930df5cf13574934be0
|
[
"MIT"
] | null | null | null |
# Python Runtime for Zoo
# @author Nate Ferrero
| 16
| 24
| 0.75
| 7
| 48
| 5.142857
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1875
| 48
| 2
| 25
| 24
| 0.923077
| 0.895833
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
c6caaa8689239304955d878d8be7ab415a622db3
| 161
|
py
|
Python
|
logger/log_context.py
|
Cogno-Marco/YetAnotherPyLogger
|
2b07d2eb064106258056602f466bb636f3cd08d1
|
[
"MIT"
] | 2
|
2020-10-01T18:52:36.000Z
|
2021-03-25T20:40:21.000Z
|
logger/log_context.py
|
Cogno-Marco/YetAnotherPyLogger
|
2b07d2eb064106258056602f466bb636f3cd08d1
|
[
"MIT"
] | 2
|
2020-10-01T18:43:34.000Z
|
2020-10-01T19:43:15.000Z
|
logger/log_context.py
|
Cogno-Marco/YetAnotherPyLogger
|
2b07d2eb064106258056602f466bb636f3cd08d1
|
[
"MIT"
] | 2
|
2020-10-01T13:22:40.000Z
|
2020-10-01T17:47:35.000Z
|
#proof of concept
def set_logger(custom_log):
set_logger.__custom_logger = custom_log
def log(text):
set_logger.__custom_logger.log(text)
| 14.636364
| 43
| 0.708075
| 23
| 161
| 4.478261
| 0.391304
| 0.466019
| 0.436893
| 0.407767
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.21118
| 161
| 10
| 44
| 16.1
| 0.811024
| 0.099379
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
c6d3e554ee3766db5c95a0f517b14fcc50a8a0bd
| 157
|
py
|
Python
|
math/abstract-algebra/2.py
|
admariner/playground
|
02a3104472c8fa3589fe87f7265e70c61d5728c7
|
[
"MIT"
] | 3
|
2021-06-12T04:42:32.000Z
|
2021-06-24T13:57:38.000Z
|
math/abstract-algebra/2.py
|
admariner/playground
|
02a3104472c8fa3589fe87f7265e70c61d5728c7
|
[
"MIT"
] | null | null | null |
math/abstract-algebra/2.py
|
admariner/playground
|
02a3104472c8fa3589fe87f7265e70c61d5728c7
|
[
"MIT"
] | 1
|
2021-08-19T14:57:17.000Z
|
2021-08-19T14:57:17.000Z
|
# Semigroup + identity property = Monoid
# The number 0 works well as an identity element for addition
print(2 + 0 == 2)
# Monoids don't have to be numbers
| 26.166667
| 61
| 0.726115
| 26
| 157
| 4.384615
| 0.884615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.032
| 0.203822
| 157
| 6
| 62
| 26.166667
| 0.88
| 0.834395
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
c6d6f06dfcb0ee3f3b342e6a2e0c9cb58c00ffa0
| 97
|
py
|
Python
|
tests/integration/lambdas/python3/lambda2/handler2.py
|
ninhkd/localstack
|
9a415e2067f6fafa3cdc9dd84f5b491b0b2a2acd
|
[
"Apache-2.0"
] | 31,928
|
2017-07-04T03:06:28.000Z
|
2022-03-31T22:33:27.000Z
|
tests/integration/lambdas/python3/lambda2/handler2.py
|
ninhkd/localstack
|
9a415e2067f6fafa3cdc9dd84f5b491b0b2a2acd
|
[
"Apache-2.0"
] | 5,216
|
2017-07-04T11:45:41.000Z
|
2022-03-31T22:02:14.000Z
|
tests/integration/lambdas/python3/lambda2/handler2.py
|
ninhkd/localstack
|
9a415e2067f6fafa3cdc9dd84f5b491b0b2a2acd
|
[
"Apache-2.0"
] | 3,056
|
2017-06-05T13:29:11.000Z
|
2022-03-31T20:54:43.000Z
|
import settings
constant = settings.SETTING2
def handler(event, context):
return constant
| 12.125
| 28
| 0.762887
| 11
| 97
| 6.727273
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0125
| 0.175258
| 97
| 7
| 29
| 13.857143
| 0.9125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
05c899afb7f4d01f6743efb2cd33ac980bca2ae3
| 132
|
py
|
Python
|
01-introductory-problems/10-trailing-zeros.py
|
hamza1886/cses
|
11f50ed6b10b50da975087e0f21400f7f484600f
|
[
"MIT"
] | null | null | null |
01-introductory-problems/10-trailing-zeros.py
|
hamza1886/cses
|
11f50ed6b10b50da975087e0f21400f7f484600f
|
[
"MIT"
] | null | null | null |
01-introductory-problems/10-trailing-zeros.py
|
hamza1886/cses
|
11f50ed6b10b50da975087e0f21400f7f484600f
|
[
"MIT"
] | null | null | null |
# https://cses.fi/problemset/task/1618
def zeros(n):
return 0 if n < 5 else zeros(n // 5) + n // 5
print(zeros(int(input())))
| 18.857143
| 49
| 0.606061
| 24
| 132
| 3.333333
| 0.708333
| 0.075
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074766
| 0.189394
| 132
| 6
| 50
| 22
| 0.672897
| 0.272727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.333333
| 0.666667
| 0.333333
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
05edd66133bf5e17da1fa10af67593270bba3f26
| 80
|
py
|
Python
|
ait/core/server/plugins/__init__.py
|
bruceplai/AIT-Core
|
3289f48b17842c48b47cd779e3d983404f951a64
|
[
"MIT"
] | 32
|
2018-06-06T16:55:25.000Z
|
2022-03-16T00:43:21.000Z
|
ait/core/server/plugins/__init__.py
|
bruceplai/AIT-Core
|
3289f48b17842c48b47cd779e3d983404f951a64
|
[
"MIT"
] | 325
|
2018-04-09T15:25:18.000Z
|
2022-03-31T18:58:15.000Z
|
ait/core/server/plugins/__init__.py
|
bruceplai/AIT-Core
|
3289f48b17842c48b47cd779e3d983404f951a64
|
[
"MIT"
] | 31
|
2018-07-19T19:20:10.000Z
|
2022-03-16T00:42:19.000Z
|
from .data_archive import *
from .limit_monitor import *
from .openmct import *
| 20
| 28
| 0.775
| 11
| 80
| 5.454545
| 0.636364
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15
| 80
| 3
| 29
| 26.666667
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
af55870589c380a1c48426112721d7f7a095b73c
| 56
|
py
|
Python
|
examples/python/celery/demo/backend.py
|
DiceTechnology/react-native-sentry
|
f44bd0678a6cdac0b07f5112d62bd784a39b7dae
|
[
"MIT"
] | 99
|
2017-08-04T22:47:20.000Z
|
2022-03-29T03:49:23.000Z
|
examples/python/celery/demo/backend.py
|
DiceTechnology/react-native-sentry
|
f44bd0678a6cdac0b07f5112d62bd784a39b7dae
|
[
"MIT"
] | 55
|
2017-10-12T16:13:22.000Z
|
2022-03-09T20:17:16.000Z
|
examples/python/celery/demo/backend.py
|
DiceTechnology/react-native-sentry
|
f44bd0678a6cdac0b07f5112d62bd784a39b7dae
|
[
"MIT"
] | 103
|
2017-09-06T12:21:18.000Z
|
2022-03-27T11:31:14.000Z
|
from settings.async import celery_app
app = celery_app
| 14
| 37
| 0.821429
| 9
| 56
| 4.888889
| 0.666667
| 0.409091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 56
| 3
| 38
| 18.666667
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.5
| null | null | 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
af6df237b8b4ea86f1d5fb9f9498689132dab6ab
| 121
|
py
|
Python
|
app/version2/models/office.py
|
lizwkariuki58/politico-api
|
da84c013648fb1e485572a3f1c0b18bd65d1be99
|
[
"MIT"
] | null | null | null |
app/version2/models/office.py
|
lizwkariuki58/politico-api
|
da84c013648fb1e485572a3f1c0b18bd65d1be99
|
[
"MIT"
] | null | null | null |
app/version2/models/office.py
|
lizwkariuki58/politico-api
|
da84c013648fb1e485572a3f1c0b18bd65d1be99
|
[
"MIT"
] | null | null | null |
from flask import request
from app.database_config import init_db
from app.version2.models.base_model import BaseModel
| 20.166667
| 52
| 0.85124
| 19
| 121
| 5.263158
| 0.736842
| 0.14
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009346
| 0.115702
| 121
| 5
| 53
| 24.2
| 0.925234
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
afa25c94ea2830cf3c98e0e042f4af5237d1708b
| 51
|
py
|
Python
|
examples/DecryptLoginExamples/crawlers/taobaosnap/__init__.py
|
hedou/DecryptLogin
|
ff86a5d378c8a42d1caebbb7482658a95053f716
|
[
"Apache-2.0"
] | null | null | null |
examples/DecryptLoginExamples/crawlers/taobaosnap/__init__.py
|
hedou/DecryptLogin
|
ff86a5d378c8a42d1caebbb7482658a95053f716
|
[
"Apache-2.0"
] | null | null | null |
examples/DecryptLoginExamples/crawlers/taobaosnap/__init__.py
|
hedou/DecryptLogin
|
ff86a5d378c8a42d1caebbb7482658a95053f716
|
[
"Apache-2.0"
] | null | null | null |
'''initialize'''
from .taobaosnap import TaobaoSnap
| 25.5
| 34
| 0.784314
| 5
| 51
| 8
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078431
| 51
| 2
| 34
| 25.5
| 0.851064
| 0.196078
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
afbe37ddd63c9260e16857468055dda564c4f31d
| 71
|
py
|
Python
|
bolt/cogs/botlog/__init__.py
|
HeavyLobster/bolt
|
d50dd59d119a627bfac32fbed2544433f066399b
|
[
"0BSD"
] | null | null | null |
bolt/cogs/botlog/__init__.py
|
HeavyLobster/bolt
|
d50dd59d119a627bfac32fbed2544433f066399b
|
[
"0BSD"
] | null | null | null |
bolt/cogs/botlog/__init__.py
|
HeavyLobster/bolt
|
d50dd59d119a627bfac32fbed2544433f066399b
|
[
"0BSD"
] | null | null | null |
from .cog import BotLog
def setup(bot):
bot.add_cog(BotLog(bot))
| 11.833333
| 28
| 0.690141
| 12
| 71
| 4
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.183099
| 71
| 5
| 29
| 14.2
| 0.827586
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
bb637c76e2a06022c58d486ce31ce8542dda4d95
| 5,866
|
py
|
Python
|
tests/fake_api/jobs/test_trace_patching.py
|
chrisburr/gitlab-runner-api
|
b6127c337b742f514e9ba1cd0c25e4284c7e83e1
|
[
"MIT"
] | 1
|
2022-03-27T15:27:32.000Z
|
2022-03-27T15:27:32.000Z
|
tests/fake_api/jobs/test_trace_patching.py
|
chrisburr/gitlab-runner-api
|
b6127c337b742f514e9ba1cd0c25e4284c7e83e1
|
[
"MIT"
] | null | null | null |
tests/fake_api/jobs/test_trace_patching.py
|
chrisburr/gitlab-runner-api
|
b6127c337b742f514e9ba1cd0c25e4284c7e83e1
|
[
"MIT"
] | 1
|
2021-02-06T02:18:34.000Z
|
2021-02-06T02:18:34.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import requests
from gitlab_runner_api.testing import API_ENDPOINT, FakeGitlabAPI, test_log
gitlab_api = FakeGitlabAPI()
@gitlab_api.use(n_runners=2, n_pending=3, n_running=2, n_success=4, n_failed=1)
def test(gitlab_api):
expected_job = gitlab_api.running_jobs[1]
# Initial log update
headers = {
"JOB-TOKEN": expected_job.token,
"Content-Range": "0-" + str(len(test_log[:1000])),
}
response = requests.patch(
API_ENDPOINT + "/jobs/" + expected_job.id + "/trace",
test_log[:1000],
headers=headers,
)
# Check the response
assert response.status_code == 202
assert response.content.decode() == "0-" + str(len(test_log[:1000]))
# Check the API's internal state
assert gitlab_api.running_jobs[1].log == test_log[:1000]
# Later log update
headers = {
"JOB-TOKEN": expected_job.token,
"Content-Range": "1000-" + str(len(test_log[1000:1300])),
}
response = requests.patch(
API_ENDPOINT + "/jobs/" + expected_job.id + "/trace",
test_log[1000:1300],
headers=headers,
)
# Check the response
assert response.status_code == 202
assert response.content.decode() == "0-1300"
# Check the API's internal state
assert gitlab_api.running_jobs[1].log == test_log[:1300]
# And a final log update
headers = {
"JOB-TOKEN": expected_job.token,
"Content-Range": "1300-" + str(len(test_log[1300:])),
}
response = requests.patch(
API_ENDPOINT + "/jobs/" + expected_job.id + "/trace",
test_log[1300:],
headers=headers,
)
# Check the response
assert response.status_code == 202
assert response.content.decode() == "0-" + str(len(test_log))
# Check the API's internal state
assert gitlab_api.running_jobs[1].log == test_log
# Check the API's internal state for the number of jobs
assert len(gitlab_api.pending_jobs) == 3
assert len(gitlab_api.running_jobs) == 2
assert len(gitlab_api.completed_jobs) == 5
for job in gitlab_api.running_jobs + gitlab_api.completed_jobs:
if job == gitlab_api.running_jobs[1]:
assert job.log == test_log
else:
assert job.log == ""
@gitlab_api.use(n_runners=2, n_pending=3, n_running=2, n_success=4, n_failed=1)
def test_auth_error(gitlab_api):
expected_job = gitlab_api.running_jobs[1]
headers_to_try = {
"No token": {"Content-Range": "0-" + str(len(test_log))},
"Wrong token": {
"JOB-TOKEN": "invalid_token",
"Content-Range": "0-" + str(len(test_log)),
},
"No token or content range": {},
"Wrong token without content range": {"JOB-TOKEN": "invalid_token"},
}
for name, headers in headers_to_try.items():
response = requests.patch(
API_ENDPOINT + "/jobs/" + expected_job.id + "/trace",
test_log,
headers=headers,
)
# Check the response
assert response.status_code == 403, name
assert response.json() == {"message": "403 Forbidden"}, name
# Check the API's internal state
assert len(gitlab_api.pending_jobs) == 3
assert len(gitlab_api.running_jobs) == 2
assert len(gitlab_api.completed_jobs) == 5
for job in gitlab_api.running_jobs + gitlab_api.completed_jobs:
assert job.log == ""
@gitlab_api.use(n_runners=2, n_pending=3, n_running=2, n_success=4, n_failed=1)
def test_range_error(gitlab_api):
expected_job = gitlab_api.running_jobs[1]
headers = {"JOB-TOKEN": expected_job.token}
response = requests.patch(
API_ENDPOINT + "/jobs/" + expected_job.id + "/trace", test_log, headers=headers
)
# Check the response
assert response.status_code == 400
assert response.json() == {"error": "Missing header Content-Range"}
assert response.headers["Range"] == "0-0"
headers_to_try = {
"1 Wrong start": {
"JOB-TOKEN": expected_job.token,
"Content-Range": "5-" + str(len(test_log)),
},
"2 Wrong length": {
"JOB-TOKEN": expected_job.token,
"Content-Range": "0-" + str(len(test_log) - 100),
},
"3 Badly formatted": {
"JOB-TOKEN": expected_job.token,
"Content-Range": "0-" + str(len(test_log)) + "-10",
},
"4 Not a number": {"JOB-TOKEN": expected_job.token, "Content-Range": "0-b"},
}
for name, headers in sorted(headers_to_try.items()):
response = requests.patch(
API_ENDPOINT + "/jobs/" + expected_job.id + "/trace",
test_log,
headers=headers,
)
# Check the response
assert response.status_code == 416, name
assert response.json() == {"error": "Range Not Satisfiable"}, name
assert response.headers["Range"] == "0-0"
# Check the API's internal state
assert len(gitlab_api.pending_jobs) == 3
assert len(gitlab_api.running_jobs) == 2
assert len(gitlab_api.completed_jobs) == 5
for job in gitlab_api.running_jobs + gitlab_api.completed_jobs:
assert job.log == ""
@gitlab_api.use(n_runners=2, n_pending=3, n_running=2, n_success=4, n_failed=1)
def test_completed(gitlab_api):
expected_job = gitlab_api.completed_jobs[1]
headers = {
"JOB-TOKEN": expected_job.token,
"Content-Range": "0-" + str(len(test_log)),
}
response = requests.patch(
API_ENDPOINT + "/jobs/" + expected_job.id + "/trace", test_log, headers=headers
)
# Check the response
assert response.status_code == 403, response.json()
assert response.json() == {"message": "403 Forbidden - Job is not running"}
assert "Range" not in response.headers
| 34.710059
| 87
| 0.625639
| 769
| 5,866
| 4.559168
| 0.127438
| 0.082145
| 0.059327
| 0.074159
| 0.801198
| 0.796349
| 0.741871
| 0.731603
| 0.687963
| 0.676269
| 0
| 0.03236
| 0.241391
| 5,866
| 168
| 88
| 34.916667
| 0.755506
| 0.06819
| 0
| 0.484615
| 0
| 0
| 0.120389
| 0
| 0
| 0
| 0
| 0
| 0.253846
| 1
| 0.030769
| false
| 0
| 0.038462
| 0
| 0.069231
| 0.007692
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
bb64d11387a87abcfcb42189219263dfb2034cd4
| 146
|
py
|
Python
|
6KYU/break_camel_case.py
|
yaznasivasai/python_codewars
|
25493591dde4649dc9c1ec3bece8191a3bed6818
|
[
"MIT"
] | 4
|
2021-07-17T22:48:03.000Z
|
2022-03-25T14:10:58.000Z
|
6KYU/break_camel_case.py
|
yaznasivasai/python_codewars
|
25493591dde4649dc9c1ec3bece8191a3bed6818
|
[
"MIT"
] | null | null | null |
6KYU/break_camel_case.py
|
yaznasivasai/python_codewars
|
25493591dde4649dc9c1ec3bece8191a3bed6818
|
[
"MIT"
] | 3
|
2021-06-14T14:18:16.000Z
|
2022-03-16T06:02:02.000Z
|
def solution(s: str) -> str:
if all(i.islower() for i in s):
return s
return ''.join(' ' + i if i.isupper() else i for i in s)
| 29.2
| 60
| 0.534247
| 27
| 146
| 2.888889
| 0.518519
| 0.102564
| 0.153846
| 0.179487
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.308219
| 146
| 5
| 61
| 29.2
| 0.772277
| 0
| 0
| 0
| 0
| 0
| 0.006803
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
bb7a06760c1fa4bcd5aa4659d0b6ddd330f60753
| 571
|
py
|
Python
|
src/app/views/api/deploy_key.py
|
grihabor/teser
|
93ecc60a6952313c6fbb10de3b1cc0647899e9e8
|
[
"BSD-3-Clause"
] | null | null | null |
src/app/views/api/deploy_key.py
|
grihabor/teser
|
93ecc60a6952313c6fbb10de3b1cc0647899e9e8
|
[
"BSD-3-Clause"
] | null | null | null |
src/app/views/api/deploy_key.py
|
grihabor/teser
|
93ecc60a6952313c6fbb10de3b1cc0647899e9e8
|
[
"BSD-3-Clause"
] | null | null | null |
import logging
from flask import jsonify
from flask_security import login_required, current_user
from tasks import generate_deploy_key
logger = logging.getLogger(__name__)
def import_generate_deploy_key(app):
@app.route('/api/deploy_key/generate', methods=['GET'])
@login_required
def generate_deploy_key_endpoint():
result = generate_deploy_key.delay(current_user.id)
logger.info('Waiting for deploy key to generate...')
deploy_key = result.get()
return jsonify(dict(deploy_key=deploy_key, result='ok'))
| 28.55
| 64
| 0.721541
| 74
| 571
| 5.256757
| 0.459459
| 0.208226
| 0.218509
| 0.118252
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.185639
| 571
| 20
| 65
| 28.55
| 0.836559
| 0
| 0
| 0
| 1
| 0
| 0.115385
| 0.041958
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0.384615
| 0
| 0.615385
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
bb815160c155e78c978ac1ec2c16ada8a87e4751
| 60
|
py
|
Python
|
python_solutions/array/list split.py
|
ersincebi/hackerrank
|
9475c8e88e9071544c10a939fe7307c8e62fe3a0
|
[
"MIT"
] | null | null | null |
python_solutions/array/list split.py
|
ersincebi/hackerrank
|
9475c8e88e9071544c10a939fe7307c8e62fe3a0
|
[
"MIT"
] | null | null | null |
python_solutions/array/list split.py
|
ersincebi/hackerrank
|
9475c8e88e9071544c10a939fe7307c8e62fe3a0
|
[
"MIT"
] | null | null | null |
a = [1, 2, 3, 4, 5]
n = len(a)
d = 4
print(a[d:] + a[:d])
| 8.571429
| 20
| 0.366667
| 16
| 60
| 1.375
| 0.625
| 0.272727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 0.3
| 60
| 7
| 20
| 8.571429
| 0.380952
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
bbba22b5f9ab87d05bf3760d53e0377822325f32
| 163
|
py
|
Python
|
myfamily/settings/__init__.py
|
fortyMiles/my-family
|
d827b7fa36753726318fcf9e55d0b482fdf8323d
|
[
"BSD-3-Clause"
] | null | null | null |
myfamily/settings/__init__.py
|
fortyMiles/my-family
|
d827b7fa36753726318fcf9e55d0b482fdf8323d
|
[
"BSD-3-Clause"
] | null | null | null |
myfamily/settings/__init__.py
|
fortyMiles/my-family
|
d827b7fa36753726318fcf9e55d0b482fdf8323d
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from .test import Test
from .local import Local # noqa
from .production import Production # noqa
| 23.285714
| 42
| 0.742331
| 22
| 163
| 5.272727
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007407
| 0.171779
| 163
| 6
| 43
| 27.166667
| 0.851852
| 0.190184
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
bbeb93940dde0cbabddef425d78f860830820ef6
| 17
|
py
|
Python
|
waveglow/__init__.py
|
kdorichev/text2speech
|
082ed9c222fa346f6c5ad6375477807df44ed45a
|
[
"Apache-2.0"
] | 3
|
2020-07-08T21:25:18.000Z
|
2021-10-04T05:44:38.000Z
|
fastpitch/__init__.py
|
kdorichev/text2speech
|
082ed9c222fa346f6c5ad6375477807df44ed45a
|
[
"Apache-2.0"
] | 2
|
2020-05-22T18:06:46.000Z
|
2020-07-06T08:22:40.000Z
|
waveglow/__init__.py
|
kdorichev/text2speech
|
082ed9c222fa346f6c5ad6375477807df44ed45a
|
[
"Apache-2.0"
] | null | null | null |
"""__init.py__"""
| 17
| 17
| 0.588235
| 2
| 17
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 17
| 1
| 17
| 17
| 0.352941
| 0.647059
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
bbedf470eaa3b7748552779bfbd41c44714b1316
| 21,278
|
py
|
Python
|
code/python/StandardDatafeed/v1/fds/sdk/StandardDatafeed/api/schemas__bundles_api.py
|
factset/enterprise-sdk
|
3fd4d1360756c515c9737a0c9a992c7451d7de7e
|
[
"Apache-2.0"
] | 6
|
2022-02-07T16:34:18.000Z
|
2022-03-30T08:04:57.000Z
|
code/python/StandardDatafeed/v1/fds/sdk/StandardDatafeed/api/schemas__bundles_api.py
|
factset/enterprise-sdk
|
3fd4d1360756c515c9737a0c9a992c7451d7de7e
|
[
"Apache-2.0"
] | 2
|
2022-02-07T05:25:57.000Z
|
2022-03-07T14:18:04.000Z
|
code/python/StandardDatafeed/v1/fds/sdk/StandardDatafeed/api/schemas__bundles_api.py
|
factset/enterprise-sdk
|
3fd4d1360756c515c9737a0c9a992c7451d7de7e
|
[
"Apache-2.0"
] | null | null | null |
"""
SDF_API
The Standard Datafeed (SDF) API provides an alternative method for users to request and retrieve SDF packages (schemas & bundles). This service is not a direct replacement and does not have 100% feature parity with the Loader. This API provides an alternative for users who are unable to utilize the Loader due to: Unable to install 3rd party executables due to Corporate Security policies Unable to utilize the Loader due to limitations or restrictions with the environment used to consume Standard Datafeed Clients who are utilizing existing delivery method like FTP, who may want to use a more secured & modern solution This API allows users to retrieve SDF packages they have subscriptions for, going back to August 31, 2021. Additional parameters are available to filter requests to get the exact files users are looking for. # noqa: E501
The version of the OpenAPI document: 1.0
Contact: teammustang@factset.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from multiprocessing.pool import ApplyResult
import typing
from fds.sdk.StandardDatafeed.api_client import ApiClient, Endpoint as _Endpoint
from fds.sdk.StandardDatafeed.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from fds.sdk.StandardDatafeed.exceptions import ApiException
from fds.sdk.StandardDatafeed.model.list_files200_response import ListFiles200Response
from fds.sdk.StandardDatafeed.model.list_files400_response import ListFiles400Response
class SchemasBundlesApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
self.get_v1_list_files_endpoint = _Endpoint(
settings={
'response_type': (
{ 200: (ListFiles200Response,), 400: (ListFiles400Response,), },
None
),
'auth': [
'FactSetApiKey',
'FactSetOAuth2'
],
'endpoint_path': '/v1/list-files',
'operation_id': 'get_v1_list_files',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'schema',
'bundle',
'type',
'start_date',
'end_date',
'pagination_limit',
'pagination_offset',
'sort',
],
'required': [],
'nullable': [
],
'enum': [
'type',
'sort',
],
'validation': [
'pagination_limit',
]
},
root_map={
'validations': {
('pagination_limit',): {
'inclusive_maximum': 500,
},
},
'allowed_values': {
('type',): {
"FULL": "full",
"DELTA": "delta"
},
('sort',): {
"STARTDATE": "startDate"
},
},
'openapi_types': {
'schema':
(str,),
'bundle':
(str,),
'type':
(str,),
'start_date':
(str,),
'end_date':
(str,),
'pagination_limit':
(int,),
'pagination_offset':
(int,),
'sort':
(str,),
},
'attribute_map': {
'schema': 'schema',
'bundle': 'bundle',
'type': 'type',
'start_date': 'startDate',
'end_date': 'endDate',
'pagination_limit': '_paginationLimit',
'pagination_offset': '_paginationOffset',
'sort': '_sort',
},
'location_map': {
'schema': 'query',
'bundle': 'query',
'type': 'query',
'start_date': 'query',
'end_date': 'query',
'pagination_limit': 'query',
'pagination_offset': 'query',
'sort': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
@staticmethod
def apply_kwargs_defaults(kwargs, return_http_data_only, async_req):
kwargs["async_req"] = async_req
kwargs["_return_http_data_only"] = return_http_data_only
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_spec_property_naming"] = kwargs.get("_spec_property_naming", False)
kwargs["_content_type"] = kwargs.get("_content_type")
kwargs["_host_index"] = kwargs.get("_host_index")
def get_v1_list_files(
self,
**kwargs
) -> ListFiles200Response:
"""get_v1_list_files # noqa: E501
<a href=https://api.factset.com/bulk-documents/sdf/v1/list-files>List-Files</a> end point provides the delta & full files in a reverse chronological order for all the schemas & bundles subscribed by the client # noqa: E501
This method makes a synchronous HTTP request. Returns the http data only
Keyword Args:
schema (str): schema name</p> Default is all schemas & bundles subscribed by the client</p> **Example: acta_v1, fgp_v1, yn_v1**. [optional]
bundle (str): bundle name</p> Default is all the bundles client is subscribed to</p> **Do not use \"schema\" parameter** when searching for a specific bundle. [optional]
type (str): file type = Full or Delta</p> Full files are weekly files generated every weekend (Saturday). **The date range should include weekend dates if requesting for \"Full\" files**. [optional]
start_date (str): The earliest date of the feed file the API should fetch for based on the fileTimestamp.</p> Data requested should be in batches of 10days **(Date range should be less than or equal to 10)**</p>**Date format - YYYY-MM-DD**. [optional]
end_date (str): The latest date of the feed file the API should fetch for based on the fileTimestamp</p>**Date format - YYYY-MM-DD**. [optional]
pagination_limit (int): Specifies the number of results to return per page. Default is 20 & Maximum is 500 results per page. [optional] if omitted the server will use the default value of 20
pagination_offset (int): Specifies the starting point for pagination. This parameter is used to identify the beginning of next set of results. [optional]
sort (str): Enables to get the data in ascending or descending order based on startTime. Results are in reverse chronological order if this parameter is not used. [optional] if omitted the server will use the default value of "startDate"
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ListFiles200Response
Response Object
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=False)
return self.get_v1_list_files_endpoint.call_with_http_info(**kwargs)
def get_v1_list_files_with_http_info(
self,
**kwargs
) -> typing.Tuple[ListFiles200Response, int, typing.MutableMapping]:
"""get_v1_list_files # noqa: E501
<a href=https://api.factset.com/bulk-documents/sdf/v1/list-files>List-Files</a> end point provides the delta & full files in a reverse chronological order for all the schemas & bundles subscribed by the client # noqa: E501
This method makes a synchronous HTTP request. Returns http data, http status and headers
Keyword Args:
schema (str): schema name</p> Default is all schemas & bundles subscribed by the client</p> **Example: acta_v1, fgp_v1, yn_v1**. [optional]
bundle (str): bundle name</p> Default is all the bundles client is subscribed to</p> **Do not use \"schema\" parameter** when searching for a specific bundle. [optional]
type (str): file type = Full or Delta</p> Full files are weekly files generated every weekend (Saturday). **The date range should include weekend dates if requesting for \"Full\" files**. [optional]
start_date (str): The earliest date of the feed file the API should fetch for based on the fileTimestamp.</p> Data requested should be in batches of 10days **(Date range should be less than or equal to 10)**</p>**Date format - YYYY-MM-DD**. [optional]
end_date (str): The latest date of the feed file the API should fetch for based on the fileTimestamp</p>**Date format - YYYY-MM-DD**. [optional]
pagination_limit (int): Specifies the number of results to return per page. Default is 20 & Maximum is 500 results per page. [optional] if omitted the server will use the default value of 20
pagination_offset (int): Specifies the starting point for pagination. This parameter is used to identify the beginning of next set of results. [optional]
sort (str): Enables to get the data in ascending or descending order based on startTime. Results are in reverse chronological order if this parameter is not used. [optional] if omitted the server will use the default value of "startDate"
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ListFiles200Response
Response Object
int
Http Status Code
dict
Dictionary of the response headers
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=False)
return self.get_v1_list_files_endpoint.call_with_http_info(**kwargs)
def get_v1_list_files_async(
self,
**kwargs
) -> "ApplyResult[ListFiles200Response]":
"""get_v1_list_files # noqa: E501
<a href=https://api.factset.com/bulk-documents/sdf/v1/list-files>List-Files</a> end point provides the delta & full files in a reverse chronological order for all the schemas & bundles subscribed by the client # noqa: E501
This method makes a asynchronous HTTP request. Returns the http data, wrapped in ApplyResult
Keyword Args:
schema (str): schema name</p> Default is all schemas & bundles subscribed by the client</p> **Example: acta_v1, fgp_v1, yn_v1**. [optional]
bundle (str): bundle name</p> Default is all the bundles client is subscribed to</p> **Do not use \"schema\" parameter** when searching for a specific bundle. [optional]
type (str): file type = Full or Delta</p> Full files are weekly files generated every weekend (Saturday). **The date range should include weekend dates if requesting for \"Full\" files**. [optional]
start_date (str): The earliest date of the feed file the API should fetch for based on the fileTimestamp.</p> Data requested should be in batches of 10days **(Date range should be less than or equal to 10)**</p>**Date format - YYYY-MM-DD**. [optional]
end_date (str): The latest date of the feed file the API should fetch for based on the fileTimestamp</p>**Date format - YYYY-MM-DD**. [optional]
pagination_limit (int): Specifies the number of results to return per page. Default is 20 & Maximum is 500 results per page. [optional] if omitted the server will use the default value of 20
pagination_offset (int): Specifies the starting point for pagination. This parameter is used to identify the beginning of next set of results. [optional]
sort (str): Enables to get the data in ascending or descending order based on startTime. Results are in reverse chronological order if this parameter is not used. [optional] if omitted the server will use the default value of "startDate"
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[ListFiles200Response]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=True)
return self.get_v1_list_files_endpoint.call_with_http_info(**kwargs)
def get_v1_list_files_with_http_info_async(
self,
**kwargs
) -> "ApplyResult[typing.Tuple[ListFiles200Response, int, typing.MutableMapping]]":
"""get_v1_list_files # noqa: E501
<a href=https://api.factset.com/bulk-documents/sdf/v1/list-files>List-Files</a> end point provides the delta & full files in a reverse chronological order for all the schemas & bundles subscribed by the client # noqa: E501
This method makes a asynchronous HTTP request. Returns http data, http status and headers, wrapped in ApplyResult
Keyword Args:
schema (str): schema name</p> Default is all schemas & bundles subscribed by the client</p> **Example: acta_v1, fgp_v1, yn_v1**. [optional]
bundle (str): bundle name</p> Default is all the bundles client is subscribed to</p> **Do not use \"schema\" parameter** when searching for a specific bundle. [optional]
type (str): file type = Full or Delta</p> Full files are weekly files generated every weekend (Saturday). **The date range should include weekend dates if requesting for \"Full\" files**. [optional]
start_date (str): The earliest date of the feed file the API should fetch for based on the fileTimestamp.</p> Data requested should be in batches of 10days **(Date range should be less than or equal to 10)**</p>**Date format - YYYY-MM-DD**. [optional]
end_date (str): The latest date of the feed file the API should fetch for based on the fileTimestamp</p>**Date format - YYYY-MM-DD**. [optional]
pagination_limit (int): Specifies the number of results to return per page. Default is 20 & Maximum is 500 results per page. [optional] if omitted the server will use the default value of 20
pagination_offset (int): Specifies the starting point for pagination. This parameter is used to identify the beginning of next set of results. [optional]
sort (str): Enables to get the data in ascending or descending order based on startTime. Results are in reverse chronological order if this parameter is not used. [optional] if omitted the server will use the default value of "startDate"
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[(ListFiles200Response, int, typing.Dict)]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=True)
return self.get_v1_list_files_endpoint.call_with_http_info(**kwargs)
| 58.941828
| 850
| 0.618056
| 2,640
| 21,278
| 4.867803
| 0.127273
| 0.025212
| 0.016263
| 0.015252
| 0.786787
| 0.779317
| 0.770135
| 0.765621
| 0.765621
| 0.761264
| 0
| 0.012163
| 0.308347
| 21,278
| 360
| 851
| 59.105556
| 0.861045
| 0.653727
| 0
| 0.196203
| 0
| 0
| 0.186704
| 0.030924
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037975
| false
| 0
| 0.056962
| 0
| 0.126582
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
bbf41802e792b5cf36867395ab88a968b19c94ed
| 2,178
|
py
|
Python
|
Linear-discriminant-function/batch_perception.py
|
LingyvKong/PatternRecognition-UCAS
|
6b7f1a357957c8560f3dc6692b0749021e470c7c
|
[
"MIT"
] | 2
|
2021-09-17T08:42:55.000Z
|
2021-10-31T09:28:49.000Z
|
Linear-discriminant-function/batch_perception.py
|
LingyvKong/PatternRecognition-UCAS
|
6b7f1a357957c8560f3dc6692b0749021e470c7c
|
[
"MIT"
] | null | null | null |
Linear-discriminant-function/batch_perception.py
|
LingyvKong/PatternRecognition-UCAS
|
6b7f1a357957c8560f3dc6692b0749021e470c7c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 11 17:30:44 2020
@author: 92031
"""
import numpy as np
import matplotlib.pyplot as plt
file = "E:\\UserData\\Agkd\\Course\\PatternRecognition\\data.txt"
MaxIter = 1000
a = np.array([0, 0, 0])
yita = 1
# 1.1
with open(file) as f:
lines = f.readlines()
lines = [line.strip().split(' ') for line in lines]
y1 = np.array(lines[:10],dtype=float)
y2 = np.array(lines[10:20],dtype=float)
y2[:,2] = 1
y2 = -y2
id1 = np.matmul(a, y1.T)
id2 = np.matmul(a, y2.T)
i = 0
while ~(np.all(id1>0) and np.all(id2>0)) and i<MaxIter:
i = i+1
a = a + yita * (np.sum(y1[id1<=0],axis=0) + np.sum(y2[id2<=0],axis=0))
id1 = np.matmul(a, y1.T)
id2 = np.matmul(a, y2.T)
print(a)
fig ,axes = plt.subplots()
n = axes.scatter(x=y1[:,0], y=y1[:,1], s=20, c='r', marker='^')
m = axes.scatter(x=-y2[:,0], y=-y2[:,1], s=20, c='g', marker='*')
plt.legend((n,m), ('$\omega_1$', '$\omega_2$'))
xx = np.arange(-10, 10, 1)
b = -a[0]/a[1] * xx - a[2]/a[1]
axes.plot(xx, b)
axes.set_title('Batch perception in $\omega_1$ and $\omega_2$')
axes.set_xlabel('x1')
axes.set_ylabel('x2')
# # 1.2
# with open(file) as f:
# lines = f.readlines()
# lines = [line.strip().split(' ') for line in lines]
# y3 = np.array(lines[20:30],dtype=float)
# y2 = np.array(lines[10:20],dtype=float)
# y2[:,2] = 1
# y3[:,2] = 1
# y2 = -y2
# id3 = np.matmul(a, y3.T)
# id2 = np.matmul(a, y2.T)
# i = 0
# while ~(np.all(id3>0) and np.all(id2>0)) and i<MaxIter:
# i = i+1
# a = a + yita * (np.sum(y3[id3<=0],axis=0) + np.sum(y2[id2<=0],axis=0))
# id3 = np.matmul(a, y3.T)
# id2 = np.matmul(a, y2.T)
# print(a)
# fig ,axes = plt.subplots()
# n = axes.scatter(x=y3[:,0], y=y3[:,1], s=20, c='r', marker='^')
# m = axes.scatter(x=-y2[:,0], y=-y2[:,1], s=20, c='g', marker='*')
# plt.legend((n,m), ('$\omega_3$', '$\omega_2$'))
# xx = np.arange(-10, 10, 1)
# b = -a[0]/a[1] * xx - a[2]/a[1]
# axes.plot(xx, b)
# axes.set_title('Batch perception in $\omega_2$ and $\omega_3$')
# axes.set_xlabel('x1')
# axes.set_ylabel('x2')
| 27.923077
| 77
| 0.53214
| 401
| 2,178
| 2.855362
| 0.234414
| 0.055895
| 0.062882
| 0.041921
| 0.768559
| 0.768559
| 0.768559
| 0.768559
| 0.716157
| 0.716157
| 0
| 0.093037
| 0.215335
| 2,178
| 77
| 78
| 28.285714
| 0.576946
| 0.447658
| 0
| 0.125
| 0
| 0
| 0.119595
| 0.051518
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.0625
| 0
| 0.0625
| 0.03125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
bbf882b5993d47cc3f6b5613da6a35b8521b0676
| 18
|
py
|
Python
|
iThenticate/__init__.py
|
pavanarya/ithenticate-api-python
|
ce4fe086721bca5dc8e7a42a7d6b1e266e4a7d11
|
[
"BSD-2-Clause"
] | 3
|
2018-11-18T20:03:45.000Z
|
2021-07-18T05:24:39.000Z
|
iThenticate/__init__.py
|
pavanarya/ithenticate-api-python
|
ce4fe086721bca5dc8e7a42a7d6b1e266e4a7d11
|
[
"BSD-2-Clause"
] | 1
|
2018-04-20T13:36:50.000Z
|
2018-04-23T08:06:09.000Z
|
iThenticate/__init__.py
|
pavanarya/ithenticate-api-python
|
ce4fe086721bca5dc8e7a42a7d6b1e266e4a7d11
|
[
"BSD-2-Clause"
] | 1
|
2021-06-09T05:55:36.000Z
|
2021-06-09T05:55:36.000Z
|
from . import API
| 9
| 17
| 0.722222
| 3
| 18
| 4.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.222222
| 18
| 1
| 18
| 18
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
a5a43db0a4a85b1dc9dab3e4a6dadf5847d90d83
| 37
|
py
|
Python
|
eod/plugins/classification/models/postprocess/__init__.py
|
Helicopt/EOD
|
b5db36f4ce267bf64d093b8174bde2c4097b4718
|
[
"Apache-2.0"
] | 196
|
2021-10-30T05:15:36.000Z
|
2022-03-30T18:43:40.000Z
|
eod/tasks/cls/models/postprocess/__init__.py
|
YZW-explorer/EOD
|
f10e64de86c0f356ebf5c7e923f4042eec4207b1
|
[
"Apache-2.0"
] | 12
|
2021-10-30T11:33:28.000Z
|
2022-03-31T14:22:58.000Z
|
eod/tasks/cls/models/postprocess/__init__.py
|
YZW-explorer/EOD
|
f10e64de86c0f356ebf5c7e923f4042eec4207b1
|
[
"Apache-2.0"
] | 23
|
2021-11-01T07:26:17.000Z
|
2022-03-27T05:55:37.000Z
|
from .cls_postprocess import * # noqa
| 37
| 37
| 0.783784
| 5
| 37
| 5.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.135135
| 37
| 1
| 37
| 37
| 0.875
| 0.108108
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
3c4c9b2ec1032ff644fe97f1fbe925756a0167c4
| 44
|
py
|
Python
|
ctf/mason/tempCodeRunnerFile.py
|
zmewshaw/cyber
|
fd406b619a966eec87234d4420de5760c9d4c8de
|
[
"MIT"
] | null | null | null |
ctf/mason/tempCodeRunnerFile.py
|
zmewshaw/cyber
|
fd406b619a966eec87234d4420de5760c9d4c8de
|
[
"MIT"
] | null | null | null |
ctf/mason/tempCodeRunnerFile.py
|
zmewshaw/cyber
|
fd406b619a966eec87234d4420de5760c9d4c8de
|
[
"MIT"
] | null | null | null |
ut).__contains__("123")):
# break
| 22
| 25
| 0.5
| 4
| 44
| 4.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096774
| 0.295455
| 44
| 2
| 26
| 22
| 0.483871
| 0
| 0
| 0
| 0
| 0
| 0.096774
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
3c8a38baecea15886b038bf51a8fab05b7bc0476
| 3,763
|
py
|
Python
|
testing/reddit_transform_issue/problem.py
|
pj0620/acca-video-series
|
1b09548014cc899ded5a8fdd1293f7fc121a98bc
|
[
"MIT"
] | null | null | null |
testing/reddit_transform_issue/problem.py
|
pj0620/acca-video-series
|
1b09548014cc899ded5a8fdd1293f7fc121a98bc
|
[
"MIT"
] | 3
|
2020-04-16T09:24:48.000Z
|
2021-03-27T19:27:48.000Z
|
testing/reddit_transform_issue/problem.py
|
pj0620/acca-video-series
|
1b09548014cc899ded5a8fdd1293f7fc121a98bc
|
[
"MIT"
] | 1
|
2020-09-01T05:32:04.000Z
|
2020-09-01T05:32:04.000Z
|
from manimlib.imports import *
class Problem(Scene):
def construct(self):
equation_R = TexMobject("R(x)", " = ", "{f(x)", "\\over", "g(x)}")
equation_R.set_color_by_tex("R(x)", PURPLE)
equation_R.set_color_by_tex("f(x)", RED)
equation_R.set_color_by_tex("g(x)", BLUE)
equation_f = TexMobject("f(x)", "=", "x^3 + 4x^2 - 11x - 30")
equation_f.set_color_by_tex("f(x)", RED)
equation_f.shift(0.5 * UP)
equation_g = TexMobject("g(x)", "=", "x^3 - 4x^2 + 5x - 2")
equation_g.set_color_by_tex("g(x)", BLUE)
equation_g.shift(0.5 * DOWN)
self.play(Write(equation_R))
self.wait(3)
self.play(Transform(equation_R, target_mobject=equation_R),
ApplyMethod(equation_R.move_to, equation_R.get_center() + 3 * UP + 5 * LEFT))
self.wait(3)
self.play(Write(equation_f))
self.wait()
self.play(Write(equation_g))
self.wait()
grouped_equation = VGroup(equation_f, equation_g)
self.play(ApplyMethod(grouped_equation.scale, 0.65))
self.play(ApplyMethod(grouped_equation.move_to, grouped_equation.get_center() + 4 * LEFT))
self.play(ReplacementTransform(equation_R[2], equation_f[2]))
self.wait()
class Solution(Scene):
def construct(self):
equation_R = TexMobject("R(x)", " = ", "{f(x)", "\\over", "g(x)}")
equation_R.set_color_by_tex("R(x)", PURPLE)
equation_R.set_color_by_tex("f(x)", RED)
equation_R.set_color_by_tex("g(x)", BLUE)
equation_f = TexMobject("f(x)", "=", "x^3 + 4x^2 - 11x - 30")
equation_f.set_color_by_tex("f(x)", RED)
equation_f.shift(0.5 * UP)
equation_g = TexMobject("g(x)", "=", "x^3 - 4x^2 + 5x - 2")
equation_g.set_color_by_tex("g(x)", BLUE)
equation_g.shift(0.5 * DOWN)
self.play(Write(equation_R))
self.wait(3)
self.play(
ApplyMethod(
equation_R.move_to, equation_R.get_center() + 3 * UP + 5 * LEFT
)
)
self.wait(3)
self.play(Write(equation_f))
self.wait()
self.play(Write(equation_g))
self.wait()
grouped_equation = VGroup(equation_f, equation_g)
# self.play(ApplyMethod(grouped_equation.scale, 0.65))
self.play(ApplyMethod(grouped_equation.move_to, grouped_equation.get_center() + 4 * LEFT))
scale_factor = 5
left_point = equation_R[3].get_left()
transform_matrix = np.array(
[[scale_factor, 0, 0],
[0, 1, 0],
[0, 0, 1]]
)
def fun(p):
return left_point + transform_matrix.dot(p-left_point)
new_mid_x = equation_R[3].get_width() * scale_factor * 0.5 + equation_R[3].get_left()[0]
self.play(
ApplyPointwiseFunction(
fun, equation_R[3],
rate_func=linear,
run_time=0.5
),
ApplyMethod(
equation_R[2].set_x, new_mid_x,
rate_func=linear,
run_time=0.5
),
ApplyMethod(
equation_R[4].set_x, new_mid_x,
rate_func=linear,
run_time=0.5
)
)
self.play(
ApplyMethod(
equation_f[2].copy().move_to, equation_R[2].get_center()
),
FadeOut(
equation_R[2]
)
)
self.play(
ApplyMethod(
equation_g[2].copy().move_to, equation_R[4].get_center()
),
FadeOut(
equation_R[4]
)
)
self.wait()
| 31.621849
| 98
| 0.528036
| 486
| 3,763
| 3.829218
| 0.154321
| 0.130575
| 0.053735
| 0.069855
| 0.779688
| 0.734551
| 0.713058
| 0.713058
| 0.713058
| 0.713058
| 0
| 0.031771
| 0.330853
| 3,763
| 119
| 99
| 31.621849
| 0.707307
| 0.013819
| 0
| 0.649485
| 0
| 0
| 0.050135
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030928
| false
| 0
| 0.010309
| 0.010309
| 0.072165
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
3c9e87365e75576a875a6f77daf45c74ea3884f1
| 90
|
py
|
Python
|
backend/src/authentication/__init__.py
|
spiritutumduo/spiritumDuo
|
987785906cd504f46ccebe3bbfe0e81cbf02bf7c
|
[
"MIT"
] | 1
|
2022-03-11T14:07:16.000Z
|
2022-03-11T14:07:16.000Z
|
backend/src/authentication/__init__.py
|
spiritutumduo/spiritumDuo
|
987785906cd504f46ccebe3bbfe0e81cbf02bf7c
|
[
"MIT"
] | 3
|
2022-02-25T22:46:46.000Z
|
2022-03-30T08:19:41.000Z
|
backend/src/authentication/__init__.py
|
spiritutumduo/spiritumDuo
|
987785906cd504f46ccebe3bbfe0e81cbf02bf7c
|
[
"MIT"
] | 1
|
2022-03-31T14:35:51.000Z
|
2022-03-31T14:35:51.000Z
|
from .authentication import SDAuthentication
from .logincontroller import LoginController
| 30
| 44
| 0.888889
| 8
| 90
| 10
| 0.625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088889
| 90
| 2
| 45
| 45
| 0.97561
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b1c5b520d5869100485805e8178f0aea1e934592
| 15,982
|
py
|
Python
|
detect.py
|
QFaceblue/Driving-Behavior-Recognition
|
98c8fab51c7074852598ea9119f472ed7b1bda13
|
[
"Apache-2.0"
] | 1
|
2022-03-13T14:37:17.000Z
|
2022-03-13T14:37:17.000Z
|
detect.py
|
QFaceblue/Driving-Behavior-Recognition
|
98c8fab51c7074852598ea9119f472ed7b1bda13
|
[
"Apache-2.0"
] | null | null | null |
detect.py
|
QFaceblue/Driving-Behavior-Recognition
|
98c8fab51c7074852598ea9119f472ed7b1bda13
|
[
"Apache-2.0"
] | null | null | null |
import torch
from efficientnet_pytorch import EfficientNet
# import onnx # 环境问题
from torchvision import datasets, models, transforms
import json
from PIL import Image
import cv2
import numpy as np
from timeit import default_timer as timer
import time
import onnxruntime
from ghost_net import ghost_net
# mydataset
classes_path = r"data/drive_classes.txt"
with open(classes_path) as f:
label_name = [c.strip() for c in f.readlines()]
num_classes = len(label_name)
# # efficientnet b0
# model = EfficientNet.from_name('efficientnet-b0',num_classes=num_classes)
# path = r"checkpoint/B0/444/B0_acc=84.8921.pth"
## model: resnet18 dataset: mydataset
# model = models.resnet18(pretrained=False,num_classes=num_classes)
# # 加载模型参数
# path = r"checkpoint/resnet18/000/B0_acc=84.8921.pth"
#
# # # mobilenetv2 dataset mydataset
# # model = models.mobilenet_v2(pretrained=False,num_classes=num_classes)
# # # 加载模型参数
# # path = r"checkpoint/mobilenetv2/000/mv2_acc=82.7338.pth"
path = r"checkpoint/ghost_net/333/ghostnet_05_kg_acc=68.3453.pth"
model = ghost_net(num_classes=num_classes, width_mult=0.5)
checkpoint = torch.load(path)
model.load_state_dict(checkpoint["net"])
print("loaded model with acc:{}".format(checkpoint["acc"]))
model.eval()
# device = 'cuda' if torch.cuda.is_available() else 'cpu'
device ='cpu'
model.to(device)
# onnx
# path = r"checkpoint/resnet18/000/B0_acc=84.8921_2.onnx"
path = r"checkpoint/resnet18/111/resnet18_kg_acc=99.3310.onnx"
path = r"checkpoint/data_11_16/mobilenetv2/pre/0/111/mobilenetv2_1_my_224.onnx"
net = cv2.dnn.readNetFromONNX(path)
# xml_path = r"checkpoint/resnet18/000/B0_acc=84.8921.xml"
# bin_path = r"checkpoint/resnet18/000/B0_acc=84.8921.bin"
# xml_path = r"checkpoint/resnet18/111/resnet18_kg_acc=99.3310.xml"
# bin_path = r"checkpoint/resnet18/111/resnet18_kg_acc=99.3310.bin"
xml_path = r"checkpoint/data_11_16/mobilenetv2/pre/0/111/mobilenetv2_1_my_224.xml"
bin_path = r"checkpoint/data_11_16/mobilenetv2/pre/0/111/mobilenetv2_1_my_224.bin"
# net = cv2.dnn.readNet(xml_path, bin_path)
# net.setPreferableTarget(cv2.dnn.DNN_TARGET_MYRIAD)
# net = cv2.dnn.readNetFromModelOptimizer(xml_path,bin_path)
# net.setPreferableBackend(cv2.dnn.DNN_BACKEND_INFERENCE_ENGINE)
# net.setPreferableTarget(cv2.dnn.DNN_TARGET_MYRIAD)
net = cv2.dnn.readNetFromModelOptimizer(xml_path,bin_path)
net.setPreferableBackend(cv2.dnn.DNN_BACKEND_INFERENCE_ENGINE)
net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)
onnx_session = onnxruntime.InferenceSession(path,None)
# # 使用openvino后端
# net.setPreferableBackend(cv2.dnn.DNN_BACKEND_INFERENCE_ENGINE)
# net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)
# # openvino
# xml_path = r"checkpoint/resnet18/000/B0_acc=84.8921.xml"
# bin_path = r"checkpoint/resnet18/000/B0_acc=84.8921.bin"
# net_openvino = cv2.dnn.readNetFromModelOptimizer(xml_path,bin_path)
# # 使用openvino后端
# net_openvino.setPreferableBackend(cv2.dnn.DNN_BACKEND_INFERENCE_ENGINE)
# net_openvino.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)
data_transform = transforms.Compose([
# transforms.RandomResizedCrop((500,500)),
# transforms.RandomHorizontalFlip(),
# ToTensor()能够把灰度范围从0-255变换到0-1之间,
# transform.Normalize()则把0-1变换到(-1,1).具体地说,对每个通道而言,Normalize执行以下操作:
# image=(image-mean)/std
transforms.Resize((224,224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
def detect_image_path(img_path,draw=True):
src = cv2.imread(img_path)
image = cv2.resize(src, (224, 224))
image = np.float32(image) / 255.0
image[:, :, ] -= (np.float32(0.485), np.float32(0.456), np.float32(0.406))
image[:, :, ] /= (np.float32(0.229), np.float32(0.224), np.float32(0.225))
image = image.transpose((2, 0, 1))
image = torch.from_numpy(image).unsqueeze(0).to(device)
outputs = model.forward(image)
pred_index = int(torch.argmax(outputs, 1).cpu().detach().numpy())
if draw:
cv2.putText(src, label_name[pred_index], (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 2)
cv2.imshow("input", src)
cv2.waitKey(0)
cv2.destroyAllWindows()
# _, y_hat = outputs.max(1) # max_value,max_index
# predicted_idx = str(y_hat.item())
# return imagenet_class_index[predicted_idx]
return pred_index, label_name[pred_index]
def detect_image(image):
image = data_transform(image).unsqueeze(0).to(device)
outputs = model.forward(image)
pred_index = int(torch.argmax(outputs, 1).cpu().detach().numpy())
return pred_index, label_name[pred_index]
# total time:1175.555784702301
def detect_video(video_path,output_path=""):
vid = cv2.VideoCapture(video_path)
if not vid.isOpened():
raise IOError("Couldn't open webcam or video")
video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC))
video_fps = vid.get(cv2.CAP_PROP_FPS)
video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))
isOutput = True if output_path != "" else False
if isOutput:
print("!!! TYPE:", type(output_path), type(
video_FourCC), type(video_fps), type(video_size))
out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size)
accum_time = 0
curr_fps = 0
fps = "FPS: ??"
prev_time = timer()
while True:
return_value, frame = vid.read()
if not return_value:
print("Can't receive frame (stream end?). Exiting ...")
break
pre_t = timer()
# opencv读取的图片不管是视频帧还是图片都是矩阵形式,即np.array,转PIL.Image格式用PIL.Image.fromarray()函数即可。
frame2 = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image = Image.fromarray(frame2)
index,label_name = detect_image(image)
# print('class_id: {} class_name: {}'.format(index,label_name))
result = np.asarray(image)
curr_t = timer()
infer_t = curr_t - pre_t
print("inferrence time:{}".format(infer_t))
curr_time = timer()
exec_time = curr_time - prev_time
prev_time = curr_time
accum_time = accum_time + exec_time
curr_fps = curr_fps + 1
if accum_time > 1:
accum_time = accum_time - 1
fps = "FPS: " + str(curr_fps)
curr_fps = 0
text ="label:{} {}".format(label_name,fps)
cv2.putText(frame, text=text, org=(150, 150), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=1, color=(0, 0, 255), thickness=3)
# cv2.putText(result, text, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 2)
cv2.namedWindow("result", cv2.WINDOW_NORMAL)
cv2.imshow("result", frame)
if isOutput:
out.write(result)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# total time:476.4733600616455
def detect_video_dnn(video_path,output_path=""):
vid = cv2.VideoCapture(video_path)
# print("buffersize:",vid.get(cv2.CAP_PROP_BUFFERSIZE))
# # 设置缓冲区
# vid.set(cv2.CAP_PROP_BUFFERSIZE, 2)
# print("buffersize:", vid.get(cv2.CAP_PROP_BUFFERSIZE))
if not vid.isOpened():
raise IOError("Couldn't open webcam or video")
video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC))
video_fps = vid.get(cv2.CAP_PROP_FPS)
video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))
isOutput = True if output_path != "" else False
print("FourCC:{} fps:{} size:{} output:{}".format(video_FourCC,video_fps,video_size,isOutput))
if isOutput:
print("!!! TYPE:", type(output_path), type(
video_FourCC), type(video_fps), type(video_size))
out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size)
accum_time = 0
curr_fps = 0
fps = "FPS: ??"
prev_time = timer()
while True:
return_value, frame = vid.read()
if not return_value:
print("Can't receive frame (stream end?). Exiting ...")
break
frame2 = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image = cv2.resize(frame2, (224, 224))
image = np.float32(image) / 255.0
image[:, :, ] -= (np.float32(0.485), np.float32(0.456), np.float32(0.406))
image[:, :, ] /= (np.float32(0.229), np.float32(0.224), np.float32(0.225))
pre_t = timer()
blob = cv2.dnn.blobFromImage(image, 1.0, (224, 224), (0, 0, 0), False)
net.setInput(blob)
probs = net.forward()
index = np.argmax(probs)
index = 0
curr_t = timer()
infer_t = curr_t - pre_t
print("inferrence time:{}".format(infer_t))
# print('class_id: {} class_name: {}'.format(index,label_name[index]))
curr_time = timer()
exec_time = curr_time - prev_time
prev_time = curr_time
accum_time = accum_time + exec_time
curr_fps = curr_fps + 1
if accum_time > 1:
accum_time = accum_time - 1
fps = "FPS: " + str(curr_fps)
curr_fps = 0
text ="label:{} {}".format(label_name[index],fps)
# print(text)
cv2.putText(frame, text=text, org=(150, 150), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=2
, color=(0, 0, 255), thickness=3)
# cv2.putText(result, text, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 2)
cv2.namedWindow("result", cv2.WINDOW_NORMAL)
cv2.imshow("result", frame)
if isOutput:
out.write(frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# total time:476.4733600616455
def detect_video_onnx(video_path,output_path=""):
vid = cv2.VideoCapture(video_path)
# print("buffersize:",vid.get(cv2.CAP_PROP_BUFFERSIZE))
# # 设置缓冲区
# vid.set(cv2.CAP_PROP_BUFFERSIZE, 2)
# print("buffersize:", vid.get(cv2.CAP_PROP_BUFFERSIZE))
if not vid.isOpened():
raise IOError("Couldn't open webcam or video")
video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC))
video_fps = vid.get(cv2.CAP_PROP_FPS)
video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))
isOutput = True if output_path != "" else False
print("FourCC:{} fps:{} size:{} output:{}".format(video_FourCC,video_fps,video_size,isOutput))
if isOutput:
print("!!! TYPE:", type(output_path), type(
video_FourCC), type(video_fps), type(video_size))
out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size)
accum_time = 0
curr_fps = 0
fps = "FPS: ??"
prev_time = timer()
while True:
return_value, frame = vid.read()
if not return_value:
print("Can't receive frame (stream end?). Exiting ...")
break
frame2 = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image = cv2.resize(frame2, (224, 224))
image = np.float32(image) / 255.0
image[:, :, ] -= (np.float32(0.485), np.float32(0.456), np.float32(0.406))
image[:, :, ] /= (np.float32(0.229), np.float32(0.224), np.float32(0.225))
image = image.reshape(1, 3, 224, 224)
pre_t = timer()
inputs = {onnx_session.get_inputs()[0].name: image}
probs = onnx_session.run(None, inputs)
index = np.argmax(probs)
# index = 0
curr_t = timer()
infer_t = curr_t - pre_t
print("inferrence time:{}".format(infer_t))
# print('class_id: {} class_name: {}'.format(index,label_name[index]))
curr_time = timer()
exec_time = curr_time - prev_time
prev_time = curr_time
accum_time = accum_time + exec_time
curr_fps = curr_fps + 1
if accum_time > 1:
accum_time = accum_time - 1
fps = "FPS: " + str(curr_fps)
curr_fps = 0
text ="label:{} {}".format(label_name[index],fps)
# print(text)
cv2.putText(frame, text=text, org=(150, 150), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=1
, color=(0, 0, 255), thickness=3)
# cv2.putText(result, text, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 2)
# cv2.namedWindow("result", cv2.WINDOW_NORMAL)
cv2.imshow("result", frame)
if isOutput:
out.write(frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# 初始化openvino,必须在指定目录运行
# C:
# cd C:\Program Files (x86)\IntelSWTools\openvino_2020.4.287\bin
# setupvars.bat
# d:
# total time:414.77942538261414
# def detect_video_openvino(video_path,output_path=""):
#
# vid = cv2.VideoCapture(video_path)
# if not vid.isOpened():
# raise IOError("Couldn't open webcam or video")
# video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC))
# video_fps = vid.get(cv2.CAP_PROP_FPS)
# video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)),
# int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))
# isOutput = True if output_path != "" else False
# if isOutput:
# print("!!! TYPE:", type(output_path), type(
# video_FourCC), type(video_fps), type(video_size))
# out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size)
# accum_time = 0
# curr_fps = 0
# fps = "FPS: ??"
# prev_time = timer()
# while True:
# return_value, frame = vid.read()
# if not return_value:
# print("Can't receive frame (stream end?). Exiting ...")
# break
# frame2 = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# image = cv2.resize(frame2, (224, 224))
# image = np.float32(image) / 255.0
# image[:, :, ] -= (np.float32(0.485), np.float32(0.456), np.float32(0.406))
# image[:, :, ] /= (np.float32(0.229), np.float32(0.224), np.float32(0.225))
# blob = cv2.dnn.blobFromImage(image, 1.0, (224, 224), (0, 0, 0), False)
# net_openvino.setInput(blob)
# probs = net_openvino.forward()
# index = np.argmax(probs)
# # print('class_id: {} class_name: {}'.format(index,label_name[index]))
# curr_time = timer()
# exec_time = curr_time - prev_time
# prev_time = curr_time
# accum_time = accum_time + exec_time
# curr_fps = curr_fps + 1
# if accum_time > 1:
# accum_time = accum_time - 1
# fps = "FPS: " + str(curr_fps)
# curr_fps = 0
# text ="label:{} {}".format(label_name[index],fps)
# cv2.putText(frame, text=text, org=(150, 150), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
# fontScale=3, color=(0, 0, 255), thickness=3)
# # cv2.putText(result, text, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 2)
# cv2.namedWindow("result", cv2.WINDOW_NORMAL)
# cv2.imshow("result", frame)
# if isOutput:
# out.write(frame)
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
def get_cap():
video_path = r"E:\zdk\videos\dataset1\raw\26.1.mp4"
# video_path = r"rtmp://58.200.131.2:1935/livetv/hunantv"
cap = vid = cv2.VideoCapture(video_path)
for i in range(20):
print(i,cap.get(i))
if __name__ == '__main__':
# img_path = r"data/imgs/drink.jpg"
# i,l = detect_image_path(img_path,False)
# print('class_id: {} class_name: {}'.format(i,l))
# video_path = r"./data/video/26.1.mp4"
video_path = r"rtmp://58.200.131.2:1935/livetv/hunantv"
video_path = r"./data/video/drive.avi"
# video_path = r"./data/video/output.avi"
# video_path = "rtmp://202.115.17.6:8002/live/test3"
# video_path ="rtsp://admin:admin@202.115.17.6:554/h265/ch1/main/av_stream"
# video_path = "rtmp://202.115.17.6:8002/live/test2"
start = time.time()
# detect_video(video_path)
detect_video_dnn(video_path)
# detect_video_onnx(video_path)
# detect_video_openvino(video_path)
end = time.time()
total = end -start
print("total time:{}".format(total))
# get_cap()
| 40.563452
| 105
| 0.636716
| 2,220
| 15,982
| 4.384234
| 0.140541
| 0.025891
| 0.024658
| 0.024658
| 0.761944
| 0.72958
| 0.722388
| 0.710983
| 0.70112
| 0.684681
| 0
| 0.071131
| 0.217995
| 15,982
| 394
| 106
| 40.563452
| 0.707633
| 0.367601
| 0
| 0.647826
| 0
| 0
| 0.098171
| 0.043207
| 0
| 0
| 0.001206
| 0
| 0
| 1
| 0.026087
| false
| 0
| 0.047826
| 0
| 0.082609
| 0.06087
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b1ce2cdb17ac198f0c53ee2a595b3e93da3375c0
| 102
|
py
|
Python
|
simple_messages.py
|
Steven-matos/python_projects
|
ec964b15f0a64bfcb969f46833e9b4bb5a2c3af1
|
[
"MIT"
] | null | null | null |
simple_messages.py
|
Steven-matos/python_projects
|
ec964b15f0a64bfcb969f46833e9b4bb5a2c3af1
|
[
"MIT"
] | null | null | null |
simple_messages.py
|
Steven-matos/python_projects
|
ec964b15f0a64bfcb969f46833e9b4bb5a2c3af1
|
[
"MIT"
] | null | null | null |
message = "This is a test at replacing text"
print(message)
message = "Change of text"
print(message)
| 20.4
| 44
| 0.745098
| 16
| 102
| 4.75
| 0.6875
| 0.236842
| 0.421053
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.156863
| 102
| 5
| 45
| 20.4
| 0.883721
| 0
| 0
| 0.5
| 0
| 0
| 0.446602
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
b1ea2b83601d9b43129b9393bc1f41426c16e4a3
| 47
|
py
|
Python
|
firstpython.py
|
antonioguglielmi/coursera-test
|
ec11b94363a0c7d8f958b933261ebecbdd2a2667
|
[
"Apache-2.0"
] | null | null | null |
firstpython.py
|
antonioguglielmi/coursera-test
|
ec11b94363a0c7d8f958b933261ebecbdd2a2667
|
[
"Apache-2.0"
] | null | null | null |
firstpython.py
|
antonioguglielmi/coursera-test
|
ec11b94363a0c7d8f958b933261ebecbdd2a2667
|
[
"Apache-2.0"
] | null | null | null |
# Display the output
print('New pyhton file!')
| 15.666667
| 25
| 0.723404
| 7
| 47
| 4.857143
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148936
| 47
| 2
| 26
| 23.5
| 0.85
| 0.382979
| 0
| 0
| 0
| 0
| 0.592593
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
b1f6fc4b9a6e713bf647fa28d9ad0a8cfe0b0845
| 54
|
py
|
Python
|
xlab/data/importer/__init__.py
|
dayfine/xlab
|
2c51d84906d5eba568e5b5c70225c2eccb1b9fc3
|
[
"MIT"
] | 2
|
2020-05-06T04:05:30.000Z
|
2020-11-10T16:23:50.000Z
|
xlab/data/importer/__init__.py
|
dayfine/xlab
|
2c51d84906d5eba568e5b5c70225c2eccb1b9fc3
|
[
"MIT"
] | 14
|
2020-05-06T06:37:50.000Z
|
2021-10-30T03:38:05.000Z
|
xlab/data/importer/__init__.py
|
dayfine/xlab
|
2c51d84906d5eba568e5b5c70225c2eccb1b9fc3
|
[
"MIT"
] | null | null | null |
from xlab.data.importer.interface import DataImporter
| 27
| 53
| 0.87037
| 7
| 54
| 6.714286
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074074
| 54
| 1
| 54
| 54
| 0.94
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
592eece9536284d3303080e48847c15c46b7e67a
| 93
|
py
|
Python
|
bluzelle/client/__init__.py
|
hhio618/bluezelle-py
|
c38a07458a36305457680196e8c47372008db5ab
|
[
"MIT"
] | 3
|
2021-08-19T10:09:29.000Z
|
2022-01-05T14:19:59.000Z
|
bluzelle/client/__init__.py
|
hhio618/bluzelle-py
|
c38a07458a36305457680196e8c47372008db5ab
|
[
"MIT"
] | null | null | null |
bluzelle/client/__init__.py
|
hhio618/bluzelle-py
|
c38a07458a36305457680196e8c47372008db5ab
|
[
"MIT"
] | null | null | null |
from .query import QueryClient # noqa: F401
from .tx import TransactionClient # noqa: F401
| 31
| 47
| 0.763441
| 12
| 93
| 5.916667
| 0.666667
| 0.225352
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.077922
| 0.172043
| 93
| 2
| 48
| 46.5
| 0.844156
| 0.225806
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
593cb328465a73ce6aefc327f1377adfc3db71cb
| 141
|
py
|
Python
|
gimme_cached_property/_django.py
|
MichaelKim0407/gimme-cached-property
|
070c2f8b8bd47e2c89355569779ce963d0127350
|
[
"MIT"
] | null | null | null |
gimme_cached_property/_django.py
|
MichaelKim0407/gimme-cached-property
|
070c2f8b8bd47e2c89355569779ce963d0127350
|
[
"MIT"
] | null | null | null |
gimme_cached_property/_django.py
|
MichaelKim0407/gimme-cached-property
|
070c2f8b8bd47e2c89355569779ce963d0127350
|
[
"MIT"
] | null | null | null |
try:
from django.utils.functional import cached_property as cached_property_django
except ImportError:
cached_property_django = None
| 28.2
| 81
| 0.822695
| 18
| 141
| 6.166667
| 0.666667
| 0.378378
| 0.36036
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.141844
| 141
| 4
| 82
| 35.25
| 0.917355
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
59496c702005b241757ed06ee054b3c4b1b6887d
| 93
|
py
|
Python
|
backend/apps/yearPlan/admin.py
|
jorgejimenez98/auditorio-django-react
|
ee39fa9aa2874154c113a95b03c3d3e909ffd7ec
|
[
"MIT"
] | null | null | null |
backend/apps/yearPlan/admin.py
|
jorgejimenez98/auditorio-django-react
|
ee39fa9aa2874154c113a95b03c3d3e909ffd7ec
|
[
"MIT"
] | null | null | null |
backend/apps/yearPlan/admin.py
|
jorgejimenez98/auditorio-django-react
|
ee39fa9aa2874154c113a95b03c3d3e909ffd7ec
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import YearPlan
admin.site.register(YearPlan)
| 18.6
| 32
| 0.827957
| 13
| 93
| 5.923077
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107527
| 93
| 4
| 33
| 23.25
| 0.927711
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
3ca0e862843fa36a7692a3260b9b168c13fa3e44
| 83
|
py
|
Python
|
vcap_utils/vcap_utils/__init__.py
|
TheRakeshPurohit/open_vision_capsules
|
2826078d2dd4c21c7725266b80bb145193a25df3
|
[
"BSD-3-Clause"
] | 2
|
2021-03-26T04:55:15.000Z
|
2021-03-26T16:55:06.000Z
|
vcap_utils/vcap_utils/__init__.py
|
TheRakeshPurohit/open_vision_capsules
|
2826078d2dd4c21c7725266b80bb145193a25df3
|
[
"BSD-3-Clause"
] | 3
|
2020-01-22T19:32:42.000Z
|
2020-01-30T20:33:33.000Z
|
vcap_utils/vcap_utils/__init__.py
|
TheRakeshPurohit/open_vision_capsules
|
2826078d2dd4c21c7725266b80bb145193a25df3
|
[
"BSD-3-Clause"
] | null | null | null |
from .version import __version__
from .backends import *
from .algorithms import *
| 20.75
| 32
| 0.795181
| 10
| 83
| 6.2
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.144578
| 83
| 3
| 33
| 27.666667
| 0.873239
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
a70fe7ca9d39c163c90aebe5ec18c929743797cd
| 178
|
py
|
Python
|
newsplease/pipeline/extractor/extractors/newspaper_extractor_no_images.py
|
freedmand/news-please
|
7b39a76dc77a36df39d9a48b5ce1d96da87efe53
|
[
"Apache-2.0"
] | 1,311
|
2017-02-28T07:26:22.000Z
|
2022-03-31T14:09:54.000Z
|
newsplease/pipeline/extractor/extractors/newspaper_extractor_no_images.py
|
freedmand/news-please
|
7b39a76dc77a36df39d9a48b5ce1d96da87efe53
|
[
"Apache-2.0"
] | 190
|
2016-12-21T11:12:44.000Z
|
2022-02-02T17:58:36.000Z
|
newsplease/pipeline/extractor/extractors/newspaper_extractor_no_images.py
|
freedmand/news-please
|
7b39a76dc77a36df39d9a48b5ce1d96da87efe53
|
[
"Apache-2.0"
] | 366
|
2017-03-01T11:27:37.000Z
|
2022-03-31T02:11:00.000Z
|
from .newspaper_extractor import NewspaperExtractor
class NewspaperExtractorNoImages(NewspaperExtractor):
def _article_kwargs(self):
return {"fetch_images": False}
| 25.428571
| 53
| 0.792135
| 16
| 178
| 8.5625
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.140449
| 178
| 6
| 54
| 29.666667
| 0.895425
| 0
| 0
| 0
| 0
| 0
| 0.067416
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
a727dd9bca6633a5a7c9c4a88fd4b33a56d7fce0
| 61
|
py
|
Python
|
api/grading/__init__.py
|
robertdroptablestudents/sqlgrader
|
2ddd3d6c47f607df8987a58348b46b11fc7f4e48
|
[
"MIT"
] | 2
|
2021-12-22T14:12:44.000Z
|
2022-01-08T00:21:05.000Z
|
api/grading/__init__.py
|
robertdroptablestudents/sqlgrader
|
2ddd3d6c47f607df8987a58348b46b11fc7f4e48
|
[
"MIT"
] | 8
|
2021-12-13T00:51:17.000Z
|
2021-12-13T01:25:17.000Z
|
api/grading/__init__.py
|
robertdroptablestudents/sqlgrader
|
2ddd3d6c47f607df8987a58348b46b11fc7f4e48
|
[
"MIT"
] | null | null | null |
from .gradingProcess import *
from .gradingUtilities import *
| 30.5
| 31
| 0.819672
| 6
| 61
| 8.333333
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114754
| 61
| 2
| 31
| 30.5
| 0.925926
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
a738e78b7803d0b0fe08d253f170e363f4ad3b28
| 310
|
py
|
Python
|
nuke_stubs/nuke/nuke_internal/__init__.py
|
sisoe24/Nuke-Python-Stubs
|
79c53cf5cb7b38e15a34fd04f672b143d9d7dc85
|
[
"MIT"
] | 1
|
2022-01-12T01:29:16.000Z
|
2022-01-12T01:29:16.000Z
|
nuke_stubs/nuke/nuke_internal/__init__.py
|
sisoe24/Nuke-Python-Stubs
|
79c53cf5cb7b38e15a34fd04f672b143d9d7dc85
|
[
"MIT"
] | null | null | null |
nuke_stubs/nuke/nuke_internal/__init__.py
|
sisoe24/Nuke-Python-Stubs
|
79c53cf5cb7b38e15a34fd04f672b143d9d7dc85
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2009 The Foundry Visionmongers Ltd. All Rights Reserved.
# get the functions that are compiled into Nuke:
from _nuke import *
from .utils import *
from .callbacks import *
from .colorspaces import *
from .executeInMain import *
from .overrides import *
from .scripts import scriptSaveAndClear
| 31
| 73
| 0.780645
| 40
| 310
| 6.025
| 0.65
| 0.248963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015326
| 0.158065
| 310
| 9
| 74
| 34.444444
| 0.908046
| 0.380645
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
59578d6568dacc57cc904290a1d684c7639ab0ae
| 40
|
py
|
Python
|
lib/pwiki/wikidata/WikiData_original_gadfly.py
|
dszmaj/wikidpad
|
1127375665935524ddb623da8dd5137038c7e53e
|
[
"Apache-2.0",
"MIT"
] | 16
|
2015-02-05T17:32:04.000Z
|
2022-01-14T13:46:36.000Z
|
lib/pwiki/wikidata/WikiData_original_gadfly.py
|
dszmaj/wikidpad
|
1127375665935524ddb623da8dd5137038c7e53e
|
[
"Apache-2.0",
"MIT"
] | 8
|
2015-06-20T20:02:41.000Z
|
2016-02-23T14:52:32.000Z
|
lib/pwiki/wikidata/WikiData_original_gadfly.py
|
dszmaj/wikidpad
|
1127375665935524ddb623da8dd5137038c7e53e
|
[
"Apache-2.0",
"MIT"
] | 11
|
2015-05-19T09:17:16.000Z
|
2017-09-14T00:43:13.000Z
|
from original_gadfly.WikiData import *
| 20
| 39
| 0.825
| 5
| 40
| 6.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 40
| 1
| 40
| 40
| 0.914286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
59624968f04c6ab327a5c7e204dcb390500a3fe5
| 149
|
py
|
Python
|
taric/__init__.py
|
uktrade/tamato
|
4ba2ffb25eea2887e4e081c81da7634cd7b4f9ca
|
[
"MIT"
] | 14
|
2020-03-25T11:11:29.000Z
|
2022-03-08T20:41:33.000Z
|
taric/__init__.py
|
uktrade/tamato
|
4ba2ffb25eea2887e4e081c81da7634cd7b4f9ca
|
[
"MIT"
] | 352
|
2020-03-25T10:42:09.000Z
|
2022-03-30T15:32:26.000Z
|
taric/__init__.py
|
uktrade/tamato
|
4ba2ffb25eea2887e4e081c81da7634cd7b4f9ca
|
[
"MIT"
] | 3
|
2020-08-06T12:22:41.000Z
|
2022-01-16T11:51:12.000Z
|
"""Implements models used by the :mod:`exporter` to keep track of which
envelopes the system has generated and which transactions they contained."""
| 49.666667
| 76
| 0.785235
| 22
| 149
| 5.318182
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.14094
| 149
| 2
| 77
| 74.5
| 0.914063
| 0.95302
| 0
| null | 1
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
599a0554cf11bbca37e75775313f537fca42cfdb
| 18
|
py
|
Python
|
my_classes/.history/ModulesPackages_PackageNamespaces/main_20210725190128.py
|
minefarmer/deep-Dive-1
|
b0675b853180c5b5781888266ea63a3793b8d855
|
[
"Unlicense"
] | null | null | null |
my_classes/.history/ModulesPackages_PackageNamespaces/main_20210725190128.py
|
minefarmer/deep-Dive-1
|
b0675b853180c5b5781888266ea63a3793b8d855
|
[
"Unlicense"
] | null | null | null |
my_classes/.history/ModulesPackages_PackageNamespaces/main_20210725190128.py
|
minefarmer/deep-Dive-1
|
b0675b853180c5b5781888266ea63a3793b8d855
|
[
"Unlicense"
] | null | null | null |
# main.py
print()
| 6
| 9
| 0.611111
| 3
| 18
| 3.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 18
| 3
| 10
| 6
| 0.733333
| 0.388889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
59c9869f74391b197c389d3bfb7ba4f7bcf5cf3e
| 333
|
py
|
Python
|
mws/parsers/__init__.py
|
samedayshipping/python3-amazon-mws
|
ead208acbd89ae174f5262c02d6cf5351b5fef61
|
[
"Unlicense"
] | 1
|
2018-04-05T13:54:05.000Z
|
2018-04-05T13:54:05.000Z
|
mws/parsers/__init__.py
|
samedayshipping/python3-amazon-mws
|
ead208acbd89ae174f5262c02d6cf5351b5fef61
|
[
"Unlicense"
] | null | null | null |
mws/parsers/__init__.py
|
samedayshipping/python3-amazon-mws
|
ead208acbd89ae174f5262c02d6cf5351b5fef61
|
[
"Unlicense"
] | null | null | null |
from .products import GetMatchingProductForIdResponse, GetCompetitivePricingForAsinResponse
from .errors import ErrorResponse, ProductError
from .fulfillment import ListInboundShipmentItemsResponse, ListInboundShipmentResponse
from .orders import ListOrdersResponse, ListOrderItemsResponse
from .reports import RequestReportResponse
| 55.5
| 91
| 0.900901
| 24
| 333
| 12.5
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.072072
| 333
| 5
| 92
| 66.6
| 0.970874
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
59d24bb3f4f415cd4082c3bf0bceb1b448cb45d0
| 79
|
py
|
Python
|
BOJ2884.py
|
INYEONGKIM/BOJ
|
5e83d77a92d18b0d20d26645c7cfe4ba3e2d25bc
|
[
"MIT"
] | 2
|
2019-03-05T15:42:46.000Z
|
2019-07-24T15:52:36.000Z
|
BOJ2884.py
|
INYEONGKIM/BOJ
|
5e83d77a92d18b0d20d26645c7cfe4ba3e2d25bc
|
[
"MIT"
] | null | null | null |
BOJ2884.py
|
INYEONGKIM/BOJ
|
5e83d77a92d18b0d20d26645c7cfe4ba3e2d25bc
|
[
"MIT"
] | null | null | null |
h,m=map(int,input().split());m+=60*h-45
if m<0:
m+=24*60
print(m//60,m%60)
| 15.8
| 39
| 0.556962
| 21
| 79
| 2.095238
| 0.571429
| 0.204545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.185714
| 0.113924
| 79
| 4
| 40
| 19.75
| 0.442857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ab70750df578368e1e4842c61421f8427fb31a32
| 4,308
|
py
|
Python
|
FVAScripts/fixCLT2Issues.py
|
dimanikulin/fva
|
372c91edd2d188ab9fe73b8f83e00a68fc3fb76e
|
[
"RSA-MD"
] | null | null | null |
FVAScripts/fixCLT2Issues.py
|
dimanikulin/fva
|
372c91edd2d188ab9fe73b8f83e00a68fc3fb76e
|
[
"RSA-MD"
] | 10
|
2022-02-06T15:18:17.000Z
|
2022-02-22T20:10:20.000Z
|
FVAScripts/fixCLT2Issues.py
|
dimanikulin/fva
|
372c91edd2d188ab9fe73b8f83e00a68fc3fb76e
|
[
"RSA-MD"
] | null | null | null |
# -*- coding: utf-8 -*-
import sys
import csv
import subprocess
import os, stat
#fixCLT2Issues.py issues.csv FVA_ERROR_MISMATCH_TAKEN_TIME >> fixedMismatchTakenTime.txt
#fixCLT2Issues.py issues.csv FVA_ERROR_NULL_TAKEN_TIME >> fixedEmptyTakenTime.txt
#fixCLT2Issues.py issues.csv FVA_ERROR_NO_DEV_ID >> fixedNotLinkedDevId.txt
#fixCLT2Issues.py issues.csv FVA_ERROR_EMPTY_DEVICE >> fixedEmptyDevice.txt
#fixCLT2Issues.py issues.csv FVA_ERROR_UKNOWN_DEVICE >> fixedUknownDevice.txt
#fixCLT2Issues.py issues.csv FVA_ERROR_LINKED_WRONG_DEVICE >> fixedLinkedWrongDevice.txt
with open(sys.argv[1], newline='', encoding='utf-8') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in spamreader:
#yield [unicode(cell, 'utf-8') for cell in row]
if row[0] == sys.argv[2] and row[0] == 'FVA_ERROR_MISMATCH_TAKEN_TIME':
print(', '.join(row))
# converting into yyyy:mm:dd-hh:mm:ss
tsParameter = list(row[2])
tsParameter[4] = ':'
tsParameter[7] = ':'
tsParameter[13] = ':'
tsParameter[16] = ':'
if tsParameter[11] == '#':
tsParameter[11] = '0'
if tsParameter[12] == '#':
tsParameter[12] = '1'
if tsParameter[14] == '#':
tsParameter[14] = '0'
if tsParameter[15] == '#':
tsParameter[15] = '1'
if tsParameter[17] == '#':
tsParameter[17] = '0'
if tsParameter[18] == '#':
tsParameter[18] = '1'
#print ('-ts' + ''.join(tsParameter))
os.chmod(row[1], stat.S_IWRITE) # clear read only file attribute
subprocess.call(['../jhead.exe', '-ts' + ''.join(tsParameter) , row[1]])
if row[0] == sys.argv[2] and row[0] == 'FVA_ERROR_NULL_TAKEN_TIME':
#print(', '.join(row))
# converting into yyyy:mm:dd-hh:mm:ss
tsParameter = list(row[2])
tsParameter[4] = ':'
tsParameter[7] = ':'
tsParameter[13] = ':'
tsParameter[16] = ':'
if tsParameter[11] == '#':
tsParameter[11] = '0'
if tsParameter[12] == '#':
tsParameter[12] = '1'
if tsParameter[14] == '#':
tsParameter[14] = '0'
if tsParameter[15] == '#':
tsParameter[15] = '1'
if tsParameter[17] == '#':
tsParameter[17] = '0'
if tsParameter[18] == '#':
tsParameter[18] = '1'
#print ('-ts' + ''.join(tsParameter))
os.chmod(row[1], stat.S_IWRITE) # clear read only file attribute
subprocess.call(['../jhead.exe', '-mkexif', row[1]])
subprocess.call(['../jhead.exe', '-rgt', row[1]])
subprocess.call(['../jhead.exe', '-ts' + ''.join(tsParameter) , row[1]])
if row[0] == sys.argv[2] and row[0] == 'FVA_ERROR_NO_DEV_ID':
print(', '.join(row))
if row[0] == sys.argv[2] and row[0] == 'FVA_ERROR_EMPTY_DEVICE' and row[2] == '177':
#print(', '.join(row))
os.chmod(row[1], stat.S_IWRITE) # clear read only file attribute
#subprocess.call(['../jhead.exe', "-te", "source.JPG" , row[1]])
#subprocess.call(['../jhead.exe', '-rgt', row[1]])
print(row[1])
subprocess.call(['../exiftool(-k).exe','-model=Era Nano 6','-make=Fly', row[1]])
if row[0] == sys.argv[2] and row[0] == 'FVA_ERROR_UKNOWN_DEVICE' and row[2] == '163':
#print(', '.join(row))
os.chmod(row[1], stat.S_IWRITE) # clear read only file attribute
subprocess.call(['../jhead.exe', "-te", "source.JPG" , row[1]])
#subprocess.call(['../jhead.exe', '-rgt', row[1]])
#subprocess.call(['../exiftool(-k).exe','-model=Era Nano 6','-make=Fly', row[1]])
if row[0] == sys.argv[2] and row[0] == 'FVA_ERROR_LINKED_WRONG_DEVICE' and row[2] == '3':
#print(', '.join(row))
os.chmod(row[1], stat.S_IWRITE) # clear read only file attribute
subprocess.call(['../jhead.exe', "-te", "source.JPG" , row[1]])
subprocess.call(['../jhead.exe', '-rgt', row[1]])
| 46.826087
| 97
| 0.528319
| 507
| 4,308
| 4.392505
| 0.205128
| 0.03233
| 0.085317
| 0.098788
| 0.819039
| 0.778626
| 0.764257
| 0.685676
| 0.685676
| 0.685676
| 0
| 0.042983
| 0.28714
| 4,308
| 91
| 98
| 47.340659
| 0.682188
| 0.271588
| 0
| 0.746032
| 0
| 0
| 0.119859
| 0.041131
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.063492
| 0
| 0.063492
| 0.047619
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ab7eeff391aad1e9e2295e716ef8db5a21345af4
| 205
|
py
|
Python
|
webauthn/helpers/bytes_to_base64url.py
|
MasterKale/py_webauthn
|
fe97b9841328aa84559bd2a282c07d20145845c1
|
[
"BSD-3-Clause"
] | 409
|
2018-03-01T20:14:27.000Z
|
2022-03-26T21:48:35.000Z
|
webauthn/helpers/bytes_to_base64url.py
|
MasterKale/py_webauthn
|
fe97b9841328aa84559bd2a282c07d20145845c1
|
[
"BSD-3-Clause"
] | 95
|
2018-05-02T03:28:57.000Z
|
2022-03-31T18:33:20.000Z
|
webauthn/helpers/bytes_to_base64url.py
|
MasterKale/py_webauthn
|
fe97b9841328aa84559bd2a282c07d20145845c1
|
[
"BSD-3-Clause"
] | 117
|
2018-03-08T16:39:11.000Z
|
2022-03-24T18:24:16.000Z
|
from base64 import urlsafe_b64encode
def bytes_to_base64url(val: bytes) -> str:
"""
Base64URL-encode the provided bytes
"""
return urlsafe_b64encode(val).decode("utf-8").replace("=", "")
| 22.777778
| 66
| 0.678049
| 25
| 205
| 5.4
| 0.76
| 0.237037
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.065089
| 0.17561
| 205
| 8
| 67
| 25.625
| 0.733728
| 0.170732
| 0
| 0
| 0
| 0
| 0.038961
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
aba5b946ced4c64ab83fdbb1efe9ccd1e8318571
| 166
|
py
|
Python
|
estimagic/tests/conftest.py
|
RobinMusolff/estimagic
|
615da47e125e9314c542bda0b55bf92bc6bf1863
|
[
"BSD-3-Clause"
] | null | null | null |
estimagic/tests/conftest.py
|
RobinMusolff/estimagic
|
615da47e125e9314c542bda0b55bf92bc6bf1863
|
[
"BSD-3-Clause"
] | null | null | null |
estimagic/tests/conftest.py
|
RobinMusolff/estimagic
|
615da47e125e9314c542bda0b55bf92bc6bf1863
|
[
"BSD-3-Clause"
] | null | null | null |
import pytest
@pytest.fixture(scope="function", autouse=True)
def fresh_directory(tmpdir):
"""Each test is executed in a fresh directory."""
tmpdir.chdir()
| 20.75
| 53
| 0.716867
| 22
| 166
| 5.363636
| 0.818182
| 0.237288
| 0.338983
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.150602
| 166
| 7
| 54
| 23.714286
| 0.836879
| 0.259036
| 0
| 0
| 0
| 0
| 0.068376
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
abdc0f17d44fbd716dffd33bece35c0f8879e9a7
| 5,066
|
py
|
Python
|
code/pyto/analysis/test/test_experiment.py
|
anmartinezs/pyseg_system
|
5bb07c7901062452a34b73f376057cabc15a13c3
|
[
"Apache-2.0"
] | 12
|
2020-01-08T01:33:02.000Z
|
2022-03-16T00:25:34.000Z
|
code/pyto/analysis/test/test_experiment.py
|
anmartinezs/pyseg_system
|
5bb07c7901062452a34b73f376057cabc15a13c3
|
[
"Apache-2.0"
] | 8
|
2019-12-19T19:34:56.000Z
|
2022-03-10T10:11:28.000Z
|
code/pyto/analysis/test/test_experiment.py
|
anmartinezs/pyseg_system
|
5bb07c7901062452a34b73f376057cabc15a13c3
|
[
"Apache-2.0"
] | 2
|
2022-03-30T13:12:22.000Z
|
2022-03-30T18:12:10.000Z
|
"""
Tests module experiments
# Author: Vladan Lucic
# $Id$
"""
from __future__ import unicode_literals
__version__ = "$Revision$"
import sys
from copy import copy, deepcopy
import pickle
import os.path
import unittest
import numpy
import numpy.testing as np_test
import scipy
from pyto.analysis.experiment import Experiment
class TestExperiment(np_test.TestCase):
"""
"""
def setUp(self):
"""
"""
pass
def testTransformByIds(self):
"""
Tests transformByIds()
"""
# same ids
old_ids = [2, 1, 4, 3]
old_values = [20, 10, 40, 30]
new_ids = [3, 2, 1, 4]
exp = Experiment()
new_values = exp.transformByIds(
ids=old_ids, new_ids=new_ids, values=old_values, default=100)
np_test.assert_equal(new_values, [30, 20, 10, 40])
# different ids
old_ids = [4, 1, 7, 6]
old_values = [40, 10, 70, 60]
new_ids = [4, 2, 1, 5, 7]
exp = Experiment()
new_values = exp.transformByIds(
ids=old_ids, new_ids=new_ids, values=old_values, default=100)
np_test.assert_equal(new_values, [40, 100, 10, 100, 70])
# same ids square form
old_ids = [2, 1, 5, 3]
new_ids = [3, 2, 1, 5]
old_values = numpy.array([[22, 21, 25, 23],
[12, 11, 15, 13],
[52, 51, 55, 53],
[32, 31, 35, 33]])
exp = Experiment()
new_values = exp.transformByIds(
ids=old_ids, new_ids=new_ids, values=old_values, default=100,
mode='square')
np_test.assert_equal(new_values,
numpy.array([[33, 32, 31, 35],
[23, 22, 21, 25],
[13, 12, 11, 15],
[53, 52, 51, 55]]))
# different ids square form
old_ids = [2, 1, 5, 3]
new_ids = [3, 4, 1]
old_values = numpy.array([[22, 21, 25, 23],
[12, 11, 15, 13],
[52, 51, 55, 53],
[32, 31, 35, 33]])
exp = Experiment()
new_values = exp.transformByIds(
ids=old_ids, new_ids=new_ids, values=old_values, default=100,
mode='square')
np_test.assert_equal(new_values,
numpy.array([[33, 100, 31],
[100, 100, 100],
[13, 100, 11]]))
# different ids square form
old_ids = [2, 1, 5, 3]
new_ids = [3, 6, 1]
old_values = numpy.array([[22, 21, 25, 23],
[12, 11, 15, 13],
[52, 51, 55, 53],
[32, 31, 35, 33]])
exp = Experiment()
new_values = exp.transformByIds(
ids=old_ids, new_ids=new_ids, values=old_values, default=100,
mode='square')
np_test.assert_equal(new_values,
numpy.array([[33, 100, 31],
[100, 100, 100],
[13, 100, 11]]))
# same ids vector_pair form
old_ids = [2, 1, 5, 3]
new_ids = [3, 2, 1, 5]
old_values = numpy.array([21, 25, 23, 15, 13, 53])
exp = Experiment()
new_values = exp.transformByIds(
ids=old_ids, new_ids=new_ids, values=old_values,
mode='vector_pair')
np_test.assert_equal(new_values, [23, 13, 53, 21, 25, 15])
# different ids vector_pair form
old_ids = [2, 1, 7, 4]
new_ids = [4, 3, 2, 1]
old_values = numpy.array([21, 72, 24, 17, 14, 74])
exp = Experiment()
new_values = exp.transformByIds(
ids=old_ids, new_ids=new_ids, values=old_values,
mode='vector_pair')
np_test.assert_equal(new_values, [-1, 24, 14, -1, -1, 21])
# different ids vector_pair form
old_ids = [2, 1, 7, 4]
new_ids = [4, 8, 2, 1]
old_values = numpy.array([21, 72, 24, 17, 14, 74])
exp = Experiment()
new_values = exp.transformByIds(
ids=old_ids, new_ids=new_ids, values=old_values,
mode='vector_pair')
np_test.assert_equal(new_values, [-1, 24, 14, -1, -1, 21])
# different ids vector_pair form
old_ids = [2, 1, 7, 4]
new_ids = [5, 8, 21]
old_values = numpy.array([21, 72, 24, 17, 14, 74])
exp = Experiment()
new_values = exp.transformByIds(
ids=old_ids, new_ids=new_ids, values=old_values, default=100,
mode='vector_pair')
np_test.assert_equal(new_values, [100, 100, 100])
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestExperiment)
unittest.TextTestRunner(verbosity=2).run(suite)
| 33.549669
| 74
| 0.487169
| 624
| 5,066
| 3.753205
| 0.160256
| 0.069172
| 0.069172
| 0.084543
| 0.729718
| 0.725021
| 0.725021
| 0.725021
| 0.719471
| 0.704099
| 0
| 0.125324
| 0.390446
| 5,066
| 150
| 75
| 33.773333
| 0.633096
| 0.058231
| 0
| 0.64486
| 0
| 0
| 0.017003
| 0
| 0
| 0
| 0
| 0
| 0.084112
| 1
| 0.018692
| false
| 0.009346
| 0.093458
| 0
| 0.121495
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
abdf2260db9be1fcc2e2f025d8aa0306d3f212e2
| 128
|
py
|
Python
|
raylab/agents/acktr/__init__.py
|
angelolovatto/raylab
|
ebaea8df1a391fb844e75df62ccf1e2e07311d88
|
[
"MIT"
] | 29
|
2020-05-05T13:25:33.000Z
|
2022-01-03T14:12:29.000Z
|
raylab/agents/acktr/__init__.py
|
angelolovatto/raylab
|
ebaea8df1a391fb844e75df62ccf1e2e07311d88
|
[
"MIT"
] | 215
|
2019-11-26T12:59:39.000Z
|
2022-02-01T12:38:31.000Z
|
raylab/agents/acktr/__init__.py
|
angelolovatto/raylab
|
ebaea8df1a391fb844e75df62ccf1e2e07311d88
|
[
"MIT"
] | 7
|
2020-06-12T01:42:02.000Z
|
2021-05-27T03:40:42.000Z
|
"""Actor Critic using Kronecker-factored Trust Region"""
from .policy import ACKTRTorchPolicy
from .trainer import ACKTRTrainer
| 32
| 56
| 0.820313
| 15
| 128
| 7
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109375
| 128
| 3
| 57
| 42.666667
| 0.921053
| 0.390625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
abe85f637b2bd77629f8fda5911806573b3ab867
| 207
|
py
|
Python
|
Stock/admin.py
|
daubers/ComponentStock
|
623d256fe66c3b1e407e01c7284ed7ca5868600d
|
[
"MIT"
] | 1
|
2015-05-16T10:44:42.000Z
|
2015-05-16T10:44:42.000Z
|
Stock/admin.py
|
daubers/ComponentStock
|
623d256fe66c3b1e407e01c7284ed7ca5868600d
|
[
"MIT"
] | null | null | null |
Stock/admin.py
|
daubers/ComponentStock
|
623d256fe66c3b1e407e01c7284ed7ca5868600d
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from models import Stock, PurchaseOrder, PurchaseOrderToComponent
admin.site.register(Stock)
admin.site.register(PurchaseOrder)
admin.site.register(PurchaseOrderToComponent)
| 34.5
| 65
| 0.864734
| 23
| 207
| 7.782609
| 0.478261
| 0.150838
| 0.284916
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.062802
| 207
| 6
| 66
| 34.5
| 0.92268
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
abfb20c20d7ad0c5e3ce2e7b0b3ff134e162b2e9
| 147
|
py
|
Python
|
src/temperature/actions.py
|
jiaweih/Temperature
|
7b30cca1bf2d02084c8c584a8cffe023020c73c7
|
[
"MIT"
] | null | null | null |
src/temperature/actions.py
|
jiaweih/Temperature
|
7b30cca1bf2d02084c8c584a8cffe023020c73c7
|
[
"MIT"
] | null | null | null |
src/temperature/actions.py
|
jiaweih/Temperature
|
7b30cca1bf2d02084c8c584a8cffe023020c73c7
|
[
"MIT"
] | 2
|
2020-06-19T22:34:27.000Z
|
2022-02-11T19:48:11.000Z
|
# -*- coding: utf-8 -*-
"""
actions
~~~~~~~
Contains all the actions w.r.t. slices and the surface.
"""
import mtslice
import surface
| 14.7
| 59
| 0.585034
| 19
| 147
| 4.526316
| 0.789474
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008929
| 0.238095
| 147
| 9
| 60
| 16.333333
| 0.758929
| 0.646259
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
abfc62eff20393d9f4e48dfd9ca9bca8f5d70510
| 137
|
py
|
Python
|
sveder/__init__.py
|
Sveder/import-sveder
|
5a53a53daeaeca1ac870433bc3ad033f8e5d346d
|
[
"Apache-2.0"
] | 1
|
2021-04-08T14:19:54.000Z
|
2021-04-08T14:19:54.000Z
|
sveder/__init__.py
|
Sveder/import-sveder
|
5a53a53daeaeca1ac870433bc3ad033f8e5d346d
|
[
"Apache-2.0"
] | null | null | null |
sveder/__init__.py
|
Sveder/import-sveder
|
5a53a53daeaeca1ac870433bc3ad033f8e5d346d
|
[
"Apache-2.0"
] | null | null | null |
import webbrowser
def open_sveder_com():
webbrowser.open_new("http://sveder.com")
if "__main__" == __name__:
open_sveder_com()
| 17.125
| 44
| 0.715328
| 18
| 137
| 4.722222
| 0.611111
| 0.317647
| 0.305882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.145985
| 137
| 7
| 45
| 19.571429
| 0.726496
| 0
| 0
| 0
| 0
| 0
| 0.182482
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.2
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
f9fc6cecf81bb7f0e5ab9aa4ce03c7e1493dc977
| 37
|
py
|
Python
|
datasets/__init__.py
|
willianrampazzo/pycoach
|
24146a191c9e1d78811f9acb3b56c8520da34fcb
|
[
"MIT"
] | 1
|
2017-11-10T18:36:22.000Z
|
2017-11-10T18:36:22.000Z
|
datasets/__init__.py
|
willianrampazzo/pycoach
|
24146a191c9e1d78811f9acb3b56c8520da34fcb
|
[
"MIT"
] | null | null | null |
datasets/__init__.py
|
willianrampazzo/pycoach
|
24146a191c9e1d78811f9acb3b56c8520da34fcb
|
[
"MIT"
] | null | null | null |
__all__ = ["FacialKeypointsDataset"]
| 18.5
| 36
| 0.783784
| 2
| 37
| 12.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081081
| 37
| 1
| 37
| 37
| 0.735294
| 0
| 0
| 0
| 0
| 0
| 0.594595
| 0.594595
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
e61571fd12993704ed1d90d0af2c5b38fe41bc3f
| 2,313
|
py
|
Python
|
parse midi.py
|
robbiebarrat/Bach_AI
|
0fb82302c50aeed9ad718b3d71f9debbaf54d1aa
|
[
"MIT"
] | 126
|
2016-07-17T21:18:23.000Z
|
2022-03-14T23:45:35.000Z
|
parse midi.py
|
robbiebarrat/Bach_AI
|
0fb82302c50aeed9ad718b3d71f9debbaf54d1aa
|
[
"MIT"
] | 1
|
2018-10-26T15:05:24.000Z
|
2018-10-26T15:05:24.000Z
|
parse midi.py
|
robbiebarrat/Bach_AI
|
0fb82302c50aeed9ad718b3d71f9debbaf54d1aa
|
[
"MIT"
] | 21
|
2016-07-18T06:16:07.000Z
|
2022-01-22T01:43:39.000Z
|
from midiutil.MidiFile import MIDIFile
MyMIDI = MIDIFile(1)
track = 0
time = 0
MyMIDI.addTrackName(track,time,"Sample Track")
MyMIDI.addTempo(track,time,120)
songdata = [[3, 1, 1, 1, 0], [3, 12, 1, 1, 0], [27, 3, 0, 2, 0], [26, 11, 0, 2, 0], [19, 3, -1, 1, 0], [18, 5, -1, 1, 0], [16, 4, 0, 1, 0], [18, 6, 0, 1, 0], [16, 0, 2, 1, 0], [19, 4, 2, 1, 0], [9, 2, 2, 0, 0], [13, 3, 2, 0, 0], [8, 4, 1, 0, 0], [10, 5, 1, 0, 0], [7, 3, 1, 0, 0], [9, 7, 1, 0, 0], [6, 3, 1, 0, 0], [9, 6, 1, 0, 0], [7, 3, 2, 0, 0], [9, 6, 2, 0, 0], [7, 0, 1, 0, 0], [8, 4, 1, 0, 0], [22, 3, 0, 1, 0], [8, 4, 0, 1, 0], [27, 1, -1, 1, 0], [13, 4, -1, 1, 0], [29, 4, 0, 1, 0], [20, 3, 0, 1, 0], [26, 9, -3, 0, 0], [16, 6, -3, 0, 0], [28, 6, -3, 1, 0], [20, 7, -3, 1, 0], [14, 4, -1, 1, 0], [14, 6, -1, 1, 0], [13, 4, -1, 1, 0], [10, 5, -1, 1, 0], [12, 4, -1, 1, 0], [8, 5, -1, 1, 0], [12, 4, 0, 1, 0], [8, 6, 0, 1, 0], [9, 3, 0, 1, 0], [3, 5, 0, 1, 0], [9, 4, 2, 1, 0], [4, 5, 2, 1, 0], [11, 4, 1, 1, 0], [6, 5, 1, 1, 0], [11, 6, 0, 1, 0], [12, 6, 0, 1, 0], [8, 5, 0, 1, 0], [6, 7, 0, 1, 0], [8, 5, 0, 1, 0], [4, 7, 0, 1, 0], [11, 6, 0, 1, 0], [5, 8, 0, 1, 0], [12, 7, 0, 1, 0], [6, 8, 0, 1, 0], [11, 8, 0, 1, 0], [5, 8, 0, 1, 0], [7, 8, 0, 0, 0], [4, 8, 0, 0, 0], [6, 9, 0, 0, 0], [3, 7, 0, 0, 0], [3, 10, 0, 0, 0], [2, 5, 0, 0, 0], [2, 9, 0, 0, 0], [0, 5, 0, 0, 0], [1, 9, 0, 0, 0], [0, 4, 0, 0, 0], [1, 9, 0, 0, 0], [0, 4, 0, 0, 0], [1, 8, 0, 0, 0], [0, 4, 0, 0, 0], [3, 8, 0, 0, 0], [3, 2, 0, 0, 0], [1, 6, 2, 0, 0], [-1, 3, 2, 0, 0], [4, 3, 1, 0, 0], [0, 3, 1, 0, 0], [5, 6, -1, 1, 0], [-4, 7, -1, 1, 0], [6, 5, -1, 1, 0], [1, 9, -1, 1, 0], [17, 3, -1, 1, 0], [14, 5, -1, 1, 0], [18, 4, -2, 0, 0], [18, 5, -2, 0, 0], [18, 8, -2, 1, 0], [17, 7, -2, 1, 0], [25, 13, -3, 0, 0], [25, 6, -3, 0, 0], [25, 10, -2, 0, 0], [26, 7, -2, 0, 0], [22, 4, 0, 0, 0], [13, 9, 0, 0, 0], [19, 0, 0, 0, 0], [11, 5, 0, 0, 0], [21, 1, 0, 0, 0], [13, 6, 0, 0, 0], [8, 5, 0, 0, 0], [7, 7, 0, 0, 0]]
for note in songdata:
pitch = note[0] + 60
if pitch < 60:
pitch = 60
if pitch > 84:
pitch = 80
duration = (note[1] / 8.0)
if duration == 0:
duration = 1
time += duration
print time
MyMIDI.addNote(0,0,pitch,time,duration,100)
binfile = open("song.mid", 'wb')
MyMIDI.writeFile(binfile)
binfile.close()
| 82.607143
| 1,793
| 0.384782
| 574
| 2,313
| 1.550523
| 0.101045
| 0.177528
| 0.101124
| 0.026966
| 0.180899
| 0.153933
| 0.138202
| 0.110112
| 0.026966
| 0.026966
| 0
| 0.343213
| 0.270644
| 2,313
| 27
| 1,794
| 85.666667
| 0.184351
| 0
| 0
| 0
| 0
| 0
| 0.009511
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.045455
| null | null | 0.045455
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
e62bdd455d69861526bf8f203c61941012dcb850
| 23
|
py
|
Python
|
Courses/HSEPython/1 week/5.py
|
searayeah/sublime-snippets
|
deff53a06948691cd5e5d7dcfa85515ddd8fab0b
|
[
"MIT"
] | null | null | null |
Courses/HSEPython/1 week/5.py
|
searayeah/sublime-snippets
|
deff53a06948691cd5e5d7dcfa85515ddd8fab0b
|
[
"MIT"
] | null | null | null |
Courses/HSEPython/1 week/5.py
|
searayeah/sublime-snippets
|
deff53a06948691cd5e5d7dcfa85515ddd8fab0b
|
[
"MIT"
] | null | null | null |
print(2**int(input()))
| 11.5
| 22
| 0.608696
| 4
| 23
| 3.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045455
| 0.043478
| 23
| 1
| 23
| 23
| 0.590909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
e64a911b30b0bb5c999e1ca2e6b326b016135501
| 95
|
py
|
Python
|
src/biopy/training/__init__.py
|
BioPyTeam/biopy
|
5c1444280d0a5098b61a99d96dc2825259c7ced5
|
[
"MIT"
] | null | null | null |
src/biopy/training/__init__.py
|
BioPyTeam/biopy
|
5c1444280d0a5098b61a99d96dc2825259c7ced5
|
[
"MIT"
] | null | null | null |
src/biopy/training/__init__.py
|
BioPyTeam/biopy
|
5c1444280d0a5098b61a99d96dc2825259c7ced5
|
[
"MIT"
] | 2
|
2021-07-23T09:30:58.000Z
|
2021-07-23T09:33:25.000Z
|
__all__ = ['trainers']
from .trainer import Trainer
from .trainer_wrapper import ThanosTrainer
| 23.75
| 42
| 0.810526
| 11
| 95
| 6.545455
| 0.636364
| 0.305556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115789
| 95
| 4
| 42
| 23.75
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
e664369b9ac7553be414428857ead57ec12f5fb7
| 171
|
py
|
Python
|
zero/core/catalogue/__init__.py
|
jsa4000/OpenGL-Python
|
62055ba0c16f54507b7ba709d6691b2e9c7bc152
|
[
"Apache-2.0"
] | null | null | null |
zero/core/catalogue/__init__.py
|
jsa4000/OpenGL-Python
|
62055ba0c16f54507b7ba709d6691b2e9c7bc152
|
[
"Apache-2.0"
] | null | null | null |
zero/core/catalogue/__init__.py
|
jsa4000/OpenGL-Python
|
62055ba0c16f54507b7ba709d6691b2e9c7bc152
|
[
"Apache-2.0"
] | 1
|
2021-10-08T06:12:11.000Z
|
2021-10-08T06:12:11.000Z
|
from __future__ import absolute_import, division, print_function
from .catalogue import Catalogue, CatalogueGroup
from .collections import CatalogueDict, CatalogueTree
| 24.428571
| 64
| 0.853801
| 18
| 171
| 7.777778
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 171
| 6
| 65
| 28.5
| 0.921053
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.333333
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
0589ef7a3f312e2956f8548d117ac563e26d1c33
| 30
|
py
|
Python
|
i3pyblocks/__version__.py
|
mgrechukh/i3pyblocks
|
8a637b3ebbced89078bee37fdd461f599bdf5b8e
|
[
"MIT"
] | null | null | null |
i3pyblocks/__version__.py
|
mgrechukh/i3pyblocks
|
8a637b3ebbced89078bee37fdd461f599bdf5b8e
|
[
"MIT"
] | null | null | null |
i3pyblocks/__version__.py
|
mgrechukh/i3pyblocks
|
8a637b3ebbced89078bee37fdd461f599bdf5b8e
|
[
"MIT"
] | 1
|
2021-11-07T17:33:06.000Z
|
2021-11-07T17:33:06.000Z
|
__version__ = "2020.10.3-dev"
| 15
| 29
| 0.7
| 5
| 30
| 3.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.259259
| 0.1
| 30
| 1
| 30
| 30
| 0.37037
| 0
| 0
| 0
| 0
| 0
| 0.433333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
552c322685f4b756aaa4fddf85a9f3c24314b79d
| 231,706
|
py
|
Python
|
boto3_type_annotations_with_docs/boto3_type_annotations/storagegateway/client.py
|
cowboygneox/boto3_type_annotations
|
450dce1de4e066b939de7eac2ec560ed1a7ddaa2
|
[
"MIT"
] | 119
|
2018-12-01T18:20:57.000Z
|
2022-02-02T10:31:29.000Z
|
boto3_type_annotations_with_docs/boto3_type_annotations/storagegateway/client.py
|
cowboygneox/boto3_type_annotations
|
450dce1de4e066b939de7eac2ec560ed1a7ddaa2
|
[
"MIT"
] | 15
|
2018-11-16T00:16:44.000Z
|
2021-11-13T03:44:18.000Z
|
boto3_type_annotations_with_docs/boto3_type_annotations/storagegateway/client.py
|
cowboygneox/boto3_type_annotations
|
450dce1de4e066b939de7eac2ec560ed1a7ddaa2
|
[
"MIT"
] | 11
|
2019-05-06T05:26:51.000Z
|
2021-09-28T15:27:59.000Z
|
from typing import Optional
from botocore.client import BaseClient
from typing import Dict
from botocore.paginate import Paginator
from botocore.waiter import Waiter
from typing import Union
from typing import List
class Client(BaseClient):
def activate_gateway(self, ActivationKey: str, GatewayName: str, GatewayTimezone: str, GatewayRegion: str, GatewayType: str = None, TapeDriveType: str = None, MediumChangerType: str = None, Tags: List = None) -> Dict:
"""
Activates the gateway you previously deployed on your host. In the activation process, you specify information such as the region you want to use for storing snapshots or tapes, the time zone for scheduled snapshots the gateway snapshot schedule window, an activation key, and a name for your gateway. The activation process also associates your gateway with your account; for more information, see UpdateGatewayInformation .
.. note::
You must turn on the gateway VM before you can activate your gateway.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/ActivateGateway>`_
**Request Syntax**
::
response = client.activate_gateway(
ActivationKey='string',
GatewayName='string',
GatewayTimezone='string',
GatewayRegion='string',
GatewayType='string',
TapeDriveType='string',
MediumChangerType='string',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
**Response Syntax**
::
{
'GatewayARN': 'string'
}
**Response Structure**
- *(dict) --*
AWS Storage Gateway returns the Amazon Resource Name (ARN) of the activated gateway. It is a string made of information such as your account, gateway name, and region. This ARN is used to reference the gateway in other API operations as well as resource-based authorization.
.. note::
For gateways activated prior to September 02, 2015, the gateway ARN contains the gateway name rather than the gateway ID. Changing the name of the gateway has no effect on the gateway ARN.
- **GatewayARN** *(string) --*
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
:type ActivationKey: string
:param ActivationKey: **[REQUIRED]**
Your gateway activation key. You can obtain the activation key by sending an HTTP GET request with redirects enabled to the gateway IP address (port 80). The redirect URL returned in the response provides you the activation key for your gateway in the query string parameter ``activationKey`` . It may also include other activation-related parameters, however, these are merely defaults -- the arguments you pass to the ``ActivateGateway`` API call determine the actual configuration of your gateway.
For more information, see https://docs.aws.amazon.com/storagegateway/latest/userguide/get-activation-key.html in the Storage Gateway User Guide.
:type GatewayName: string
:param GatewayName: **[REQUIRED]**
The name you configured for your gateway.
:type GatewayTimezone: string
:param GatewayTimezone: **[REQUIRED]**
A value that indicates the time zone you want to set for the gateway. The time zone is of the format \"GMT-hr:mm\" or \"GMT+hr:mm\". For example, GMT-4:00 indicates the time is 4 hours behind GMT. GMT+2:00 indicates the time is 2 hours ahead of GMT. The time zone is used, for example, for scheduling snapshots and your gateway\'s maintenance schedule.
:type GatewayRegion: string
:param GatewayRegion: **[REQUIRED]**
A value that indicates the region where you want to store your data. The gateway region specified must be the same region as the region in your ``Host`` header in the request. For more information about available regions and endpoints for AWS Storage Gateway, see `Regions and Endpoints <https://docs.aws.amazon.com/general/latest/gr/rande.html#sg_region>`__ in the *Amazon Web Services Glossary* .
Valid Values: See `AWS Storage Gateway Regions and Endpoints <https://docs.aws.amazon.com/general/latest/gr/rande.html#sg_region>`__ in the AWS General Reference.
:type GatewayType: string
:param GatewayType:
A value that defines the type of gateway to activate. The type specified is critical to all later functions of the gateway and cannot be changed after activation. The default value is ``CACHED`` .
Valid Values: \"STORED\", \"CACHED\", \"VTL\", \"FILE_S3\"
:type TapeDriveType: string
:param TapeDriveType:
The value that indicates the type of tape drive to use for tape gateway. This field is optional.
Valid Values: \"IBM-ULT3580-TD5\"
:type MediumChangerType: string
:param MediumChangerType:
The value that indicates the type of medium changer to use for tape gateway. This field is optional.
Valid Values: \"STK-L700\", \"AWS-Gateway-VTL\"
:type Tags: list
:param Tags:
A list of up to 50 tags that can be assigned to the gateway. Each tag is a key-value pair.
.. note::
Valid characters for key and value are letters, spaces, and numbers representable in UTF-8 format, and the following special characters: + - = . _ : / @. The maximum length of a tag\'s key is 128 characters, and the maximum length for a tag\'s value is 256.
- *(dict) --*
A key-value pair that helps you manage, filter, and search for your resource. Allowed characters: letters, white space, and numbers, representable in UTF-8, and the following characters: + - = . _ : /
- **Key** *(string) --* **[REQUIRED]**
Tag key (String). The key can\'t start with aws:.
- **Value** *(string) --* **[REQUIRED]**
Value of the tag key.
:rtype: dict
:returns:
"""
pass
def add_cache(self, GatewayARN: str, DiskIds: List) -> Dict:
"""
Configures one or more gateway local disks as cache for a gateway. This operation is only supported in the cached volume, tape and file gateway type (see `Storage Gateway Concepts <https://docs.aws.amazon.com/storagegateway/latest/userguide/StorageGatewayConcepts.html>`__ ).
In the request, you specify the gateway Amazon Resource Name (ARN) to which you want to add cache, and one or more disk IDs that you want to configure as cache.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/AddCache>`_
**Request Syntax**
::
response = client.add_cache(
GatewayARN='string',
DiskIds=[
'string',
]
)
**Response Syntax**
::
{
'GatewayARN': 'string'
}
**Response Structure**
- *(dict) --*
- **GatewayARN** *(string) --*
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
:type GatewayARN: string
:param GatewayARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
:type DiskIds: list
:param DiskIds: **[REQUIRED]**
An array of strings that identify disks that are to be configured as working storage. Each string have a minimum length of 1 and maximum length of 300. You can get the disk IDs from the ListLocalDisks API.
- *(string) --*
:rtype: dict
:returns:
"""
pass
def add_tags_to_resource(self, ResourceARN: str, Tags: List) -> Dict:
"""
Adds one or more tags to the specified resource. You use tags to add metadata to resources, which you can use to categorize these resources. For example, you can categorize resources by purpose, owner, environment, or team. Each tag consists of a key and a value, which you define. You can add tags to the following AWS Storage Gateway resources:
* Storage gateways of all types
* Storage volumes
* Virtual tapes
* NFS and SMB file shares
You can create a maximum of 50 tags for each resource. Virtual tapes and storage volumes that are recovered to a new gateway maintain their tags.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/AddTagsToResource>`_
**Request Syntax**
::
response = client.add_tags_to_resource(
ResourceARN='string',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
**Response Syntax**
::
{
'ResourceARN': 'string'
}
**Response Structure**
- *(dict) --*
AddTagsToResourceOutput
- **ResourceARN** *(string) --*
The Amazon Resource Name (ARN) of the resource you want to add tags to.
:type ResourceARN: string
:param ResourceARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the resource you want to add tags to.
:type Tags: list
:param Tags: **[REQUIRED]**
The key-value pair that represents the tag you want to add to the resource. The value can be an empty string.
.. note::
Valid characters for key and value are letters, spaces, and numbers representable in UTF-8 format, and the following special characters: + - = . _ : / @. The maximum length of a tag\'s key is 128 characters, and the maximum length for a tag\'s value is 256.
- *(dict) --*
A key-value pair that helps you manage, filter, and search for your resource. Allowed characters: letters, white space, and numbers, representable in UTF-8, and the following characters: + - = . _ : /
- **Key** *(string) --* **[REQUIRED]**
Tag key (String). The key can\'t start with aws:.
- **Value** *(string) --* **[REQUIRED]**
Value of the tag key.
:rtype: dict
:returns:
"""
pass
def add_upload_buffer(self, GatewayARN: str, DiskIds: List) -> Dict:
"""
Configures one or more gateway local disks as upload buffer for a specified gateway. This operation is supported for the stored volume, cached volume and tape gateway types.
In the request, you specify the gateway Amazon Resource Name (ARN) to which you want to add upload buffer, and one or more disk IDs that you want to configure as upload buffer.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/AddUploadBuffer>`_
**Request Syntax**
::
response = client.add_upload_buffer(
GatewayARN='string',
DiskIds=[
'string',
]
)
**Response Syntax**
::
{
'GatewayARN': 'string'
}
**Response Structure**
- *(dict) --*
- **GatewayARN** *(string) --*
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
:type GatewayARN: string
:param GatewayARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
:type DiskIds: list
:param DiskIds: **[REQUIRED]**
An array of strings that identify disks that are to be configured as working storage. Each string have a minimum length of 1 and maximum length of 300. You can get the disk IDs from the ListLocalDisks API.
- *(string) --*
:rtype: dict
:returns:
"""
pass
def add_working_storage(self, GatewayARN: str, DiskIds: List) -> Dict:
"""
Configures one or more gateway local disks as working storage for a gateway. This operation is only supported in the stored volume gateway type. This operation is deprecated in cached volume API version 20120630. Use AddUploadBuffer instead.
.. note::
Working storage is also referred to as upload buffer. You can also use the AddUploadBuffer operation to add upload buffer to a stored volume gateway.
In the request, you specify the gateway Amazon Resource Name (ARN) to which you want to add working storage, and one or more disk IDs that you want to configure as working storage.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/AddWorkingStorage>`_
**Request Syntax**
::
response = client.add_working_storage(
GatewayARN='string',
DiskIds=[
'string',
]
)
**Response Syntax**
::
{
'GatewayARN': 'string'
}
**Response Structure**
- *(dict) --*
A JSON object containing the of the gateway for which working storage was configured.
- **GatewayARN** *(string) --*
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
:type GatewayARN: string
:param GatewayARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
:type DiskIds: list
:param DiskIds: **[REQUIRED]**
An array of strings that identify disks that are to be configured as working storage. Each string have a minimum length of 1 and maximum length of 300. You can get the disk IDs from the ListLocalDisks API.
- *(string) --*
:rtype: dict
:returns:
"""
pass
def attach_volume(self, GatewayARN: str, VolumeARN: str, NetworkInterfaceId: str, TargetName: str = None, DiskId: str = None) -> Dict:
"""
Connects a volume to an iSCSI connection and then attaches the volume to the specified gateway. Detaching and attaching a volume enables you to recover your data from one gateway to a different gateway without creating a snapshot. It also makes it easier to move your volumes from an on-premises gateway to a gateway hosted on an Amazon EC2 instance.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/AttachVolume>`_
**Request Syntax**
::
response = client.attach_volume(
GatewayARN='string',
TargetName='string',
VolumeARN='string',
NetworkInterfaceId='string',
DiskId='string'
)
**Response Syntax**
::
{
'VolumeARN': 'string',
'TargetARN': 'string'
}
**Response Structure**
- *(dict) --*
AttachVolumeOutput
- **VolumeARN** *(string) --*
The Amazon Resource Name (ARN) of the volume that was attached to the gateway.
- **TargetARN** *(string) --*
The Amazon Resource Name (ARN) of the volume target, which includes the iSCSI name for the initiator that was used to connect to the target.
:type GatewayARN: string
:param GatewayARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the gateway that you want to attach the volume to.
:type TargetName: string
:param TargetName:
The name of the iSCSI target used by an initiator to connect to a volume and used as a suffix for the target ARN. For example, specifying ``TargetName`` as *myvolume* results in the target ARN of ``arn:aws:storagegateway:us-east-2:111122223333:gateway/sgw-12A3456B/target/iqn.1997-05.com.amazon:myvolume`` . The target name must be unique across all volumes on a gateway.
If you don\'t specify a value, Storage Gateway uses the value that was previously used for this volume as the new target name.
:type VolumeARN: string
:param VolumeARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the volume to attach to the specified gateway.
:type NetworkInterfaceId: string
:param NetworkInterfaceId: **[REQUIRED]**
The network interface of the gateway on which to expose the iSCSI target. Only IPv4 addresses are accepted. Use DescribeGatewayInformation to get a list of the network interfaces available on a gateway.
Valid Values: A valid IP address.
:type DiskId: string
:param DiskId:
The unique device ID or other distinguishing data that identifies the local disk used to create the volume. This value is only required when you are attaching a stored volume.
:rtype: dict
:returns:
"""
pass
def can_paginate(self, operation_name: str = None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you\'d normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator(\"create_foo\")``.
:return: ``True`` if the operation can be paginated,
``False`` otherwise.
"""
pass
def cancel_archival(self, GatewayARN: str, TapeARN: str) -> Dict:
"""
Cancels archiving of a virtual tape to the virtual tape shelf (VTS) after the archiving process is initiated. This operation is only supported in the tape gateway type.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/CancelArchival>`_
**Request Syntax**
::
response = client.cancel_archival(
GatewayARN='string',
TapeARN='string'
)
**Response Syntax**
::
{
'TapeARN': 'string'
}
**Response Structure**
- *(dict) --*
CancelArchivalOutput
- **TapeARN** *(string) --*
The Amazon Resource Name (ARN) of the virtual tape for which archiving was canceled.
:type GatewayARN: string
:param GatewayARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
:type TapeARN: string
:param TapeARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the virtual tape you want to cancel archiving for.
:rtype: dict
:returns:
"""
pass
def cancel_retrieval(self, GatewayARN: str, TapeARN: str) -> Dict:
"""
Cancels retrieval of a virtual tape from the virtual tape shelf (VTS) to a gateway after the retrieval process is initiated. The virtual tape is returned to the VTS. This operation is only supported in the tape gateway type.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/CancelRetrieval>`_
**Request Syntax**
::
response = client.cancel_retrieval(
GatewayARN='string',
TapeARN='string'
)
**Response Syntax**
::
{
'TapeARN': 'string'
}
**Response Structure**
- *(dict) --*
CancelRetrievalOutput
- **TapeARN** *(string) --*
The Amazon Resource Name (ARN) of the virtual tape for which retrieval was canceled.
:type GatewayARN: string
:param GatewayARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
:type TapeARN: string
:param TapeARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the virtual tape you want to cancel retrieval for.
:rtype: dict
:returns:
"""
pass
def create_cached_iscsi_volume(self, GatewayARN: str, VolumeSizeInBytes: int, TargetName: str, NetworkInterfaceId: str, ClientToken: str, SnapshotId: str = None, SourceVolumeARN: str = None, KMSEncrypted: bool = None, KMSKey: str = None, Tags: List = None) -> Dict:
"""
Creates a cached volume on a specified cached volume gateway. This operation is only supported in the cached volume gateway type.
.. note::
Cache storage must be allocated to the gateway before you can create a cached volume. Use the AddCache operation to add cache storage to a gateway.
In the request, you must specify the gateway, size of the volume in bytes, the iSCSI target name, an IP address on which to expose the target, and a unique client token. In response, the gateway creates the volume and returns information about it. This information includes the volume Amazon Resource Name (ARN), its size, and the iSCSI target ARN that initiators can use to connect to the volume target.
Optionally, you can provide the ARN for an existing volume as the ``SourceVolumeARN`` for this cached volume, which creates an exact copy of the existing volume’s latest recovery point. The ``VolumeSizeInBytes`` value must be equal to or larger than the size of the copied volume, in bytes.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/CreateCachediSCSIVolume>`_
**Request Syntax**
::
response = client.create_cached_iscsi_volume(
GatewayARN='string',
VolumeSizeInBytes=123,
SnapshotId='string',
TargetName='string',
SourceVolumeARN='string',
NetworkInterfaceId='string',
ClientToken='string',
KMSEncrypted=True|False,
KMSKey='string',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
**Response Syntax**
::
{
'VolumeARN': 'string',
'TargetARN': 'string'
}
**Response Structure**
- *(dict) --*
- **VolumeARN** *(string) --*
The Amazon Resource Name (ARN) of the configured volume.
- **TargetARN** *(string) --*
The Amazon Resource Name (ARN) of the volume target, which includes the iSCSI name that initiators can use to connect to the target.
:type GatewayARN: string
:param GatewayARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
:type VolumeSizeInBytes: integer
:param VolumeSizeInBytes: **[REQUIRED]**
The size of the volume in bytes.
:type SnapshotId: string
:param SnapshotId:
The snapshot ID (e.g. \"snap-1122aabb\") of the snapshot to restore as the new cached volume. Specify this field if you want to create the iSCSI storage volume from a snapshot otherwise do not include this field. To list snapshots for your account use `DescribeSnapshots <https://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeSnapshots.html>`__ in the *Amazon Elastic Compute Cloud API Reference* .
:type TargetName: string
:param TargetName: **[REQUIRED]**
The name of the iSCSI target used by an initiator to connect to a volume and used as a suffix for the target ARN. For example, specifying ``TargetName`` as *myvolume* results in the target ARN of ``arn:aws:storagegateway:us-east-2:111122223333:gateway/sgw-12A3456B/target/iqn.1997-05.com.amazon:myvolume`` . The target name must be unique across all volumes on a gateway.
If you don\'t specify a value, Storage Gateway uses the value that was previously used for this volume as the new target name.
:type SourceVolumeARN: string
:param SourceVolumeARN:
The ARN for an existing volume. Specifying this ARN makes the new volume into an exact copy of the specified existing volume\'s latest recovery point. The ``VolumeSizeInBytes`` value for this new volume must be equal to or larger than the size of the existing volume, in bytes.
:type NetworkInterfaceId: string
:param NetworkInterfaceId: **[REQUIRED]**
The network interface of the gateway on which to expose the iSCSI target. Only IPv4 addresses are accepted. Use DescribeGatewayInformation to get a list of the network interfaces available on a gateway.
Valid Values: A valid IP address.
:type ClientToken: string
:param ClientToken: **[REQUIRED]**
A unique identifier that you use to retry a request. If you retry a request, use the same ``ClientToken`` you specified in the initial request.
:type KMSEncrypted: boolean
:param KMSEncrypted:
True to use Amazon S3 server side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.
:type KMSKey: string
:param KMSKey:
The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server side encryption. This value can only be set when KMSEncrypted is true. Optional.
:type Tags: list
:param Tags:
A list of up to 50 tags that can be assigned to a cached volume. Each tag is a key-value pair.
.. note::
Valid characters for key and value are letters, spaces, and numbers representable in UTF-8 format, and the following special characters: + - = . _ : / @. The maximum length of a tag\'s key is 128 characters, and the maximum length for a tag\'s value is 256.
- *(dict) --*
A key-value pair that helps you manage, filter, and search for your resource. Allowed characters: letters, white space, and numbers, representable in UTF-8, and the following characters: + - = . _ : /
- **Key** *(string) --* **[REQUIRED]**
Tag key (String). The key can\'t start with aws:.
- **Value** *(string) --* **[REQUIRED]**
Value of the tag key.
:rtype: dict
:returns:
"""
pass
def create_nfs_file_share(self, ClientToken: str, GatewayARN: str, Role: str, LocationARN: str, NFSFileShareDefaults: Dict = None, KMSEncrypted: bool = None, KMSKey: str = None, DefaultStorageClass: str = None, ObjectACL: str = None, ClientList: List = None, Squash: str = None, ReadOnly: bool = None, GuessMIMETypeEnabled: bool = None, RequesterPays: bool = None, Tags: List = None) -> Dict:
"""
Creates a Network File System (NFS) file share on an existing file gateway. In Storage Gateway, a file share is a file system mount point backed by Amazon S3 cloud storage. Storage Gateway exposes file shares using a NFS interface. This operation is only supported for file gateways.
.. warning::
File gateway requires AWS Security Token Service (AWS STS) to be activated to enable you create a file share. Make sure AWS STS is activated in the region you are creating your file gateway in. If AWS STS is not activated in the region, activate it. For information about how to activate AWS STS, see Activating and Deactivating AWS STS in an AWS Region in the AWS Identity and Access Management User Guide.
File gateway does not support creating hard or symbolic links on a file share.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/CreateNFSFileShare>`_
**Request Syntax**
::
response = client.create_nfs_file_share(
ClientToken='string',
NFSFileShareDefaults={
'FileMode': 'string',
'DirectoryMode': 'string',
'GroupId': 123,
'OwnerId': 123
},
GatewayARN='string',
KMSEncrypted=True|False,
KMSKey='string',
Role='string',
LocationARN='string',
DefaultStorageClass='string',
ObjectACL='private'|'public-read'|'public-read-write'|'authenticated-read'|'bucket-owner-read'|'bucket-owner-full-control'|'aws-exec-read',
ClientList=[
'string',
],
Squash='string',
ReadOnly=True|False,
GuessMIMETypeEnabled=True|False,
RequesterPays=True|False,
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
**Response Syntax**
::
{
'FileShareARN': 'string'
}
**Response Structure**
- *(dict) --*
CreateNFSFileShareOutput
- **FileShareARN** *(string) --*
The Amazon Resource Name (ARN) of the newly created file share.
:type ClientToken: string
:param ClientToken: **[REQUIRED]**
A unique string value that you supply that is used by file gateway to ensure idempotent file share creation.
:type NFSFileShareDefaults: dict
:param NFSFileShareDefaults:
File share default values. Optional.
- **FileMode** *(string) --*
The Unix file mode in the form \"nnnn\". For example, \"0666\" represents the default file mode inside the file share. The default value is 0666.
- **DirectoryMode** *(string) --*
The Unix directory mode in the form \"nnnn\". For example, \"0666\" represents the default access mode for all directories inside the file share. The default value is 0777.
- **GroupId** *(integer) --*
The default group ID for the file share (unless the files have another group ID specified). The default value is nfsnobody.
- **OwnerId** *(integer) --*
The default owner ID for files in the file share (unless the files have another owner ID specified). The default value is nfsnobody.
:type GatewayARN: string
:param GatewayARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the file gateway on which you want to create a file share.
:type KMSEncrypted: boolean
:param KMSEncrypted:
True to use Amazon S3 server side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.
:type KMSKey: string
:param KMSKey:
The Amazon Resource Name (ARN) AWS KMS key used for Amazon S3 server side encryption. This value can only be set when KMSEncrypted is true. Optional.
:type Role: string
:param Role: **[REQUIRED]**
The ARN of the AWS Identity and Access Management (IAM) role that a file gateway assumes when it accesses the underlying storage.
:type LocationARN: string
:param LocationARN: **[REQUIRED]**
The ARN of the backed storage used for storing file data.
:type DefaultStorageClass: string
:param DefaultStorageClass:
The default storage class for objects put into an Amazon S3 bucket by the file gateway. Possible values are ``S3_STANDARD`` , ``S3_STANDARD_IA`` , or ``S3_ONEZONE_IA`` . If this field is not populated, the default value ``S3_STANDARD`` is used. Optional.
:type ObjectACL: string
:param ObjectACL:
A value that sets the access control list permission for objects in the S3 bucket that a file gateway puts objects into. The default value is \"private\".
:type ClientList: list
:param ClientList:
The list of clients that are allowed to access the file gateway. The list must contain either valid IP addresses or valid CIDR blocks.
- *(string) --*
:type Squash: string
:param Squash:
A value that maps a user to anonymous user. Valid options are the following:
* ``RootSquash`` - Only root is mapped to anonymous user.
* ``NoSquash`` - No one is mapped to anonymous user
* ``AllSquash`` - Everyone is mapped to anonymous user.
:type ReadOnly: boolean
:param ReadOnly:
A value that sets the write status of a file share. This value is true if the write status is read-only, and otherwise false.
:type GuessMIMETypeEnabled: boolean
:param GuessMIMETypeEnabled:
A value that enables guessing of the MIME type for uploaded objects based on file extensions. Set this value to true to enable MIME type guessing, and otherwise to false. The default value is true.
:type RequesterPays: boolean
:param RequesterPays:
A value that sets who pays the cost of the request and the cost associated with data download from the S3 bucket. If this value is set to true, the requester pays the costs. Otherwise the S3 bucket owner pays. However, the S3 bucket owner always pays the cost of storing data.
.. note::
``RequesterPays`` is a configuration for the S3 bucket that backs the file share, so make sure that the configuration on the file share is the same as the S3 bucket configuration.
:type Tags: list
:param Tags:
A list of up to 50 tags that can be assigned to the NFS file share. Each tag is a key-value pair.
.. note::
Valid characters for key and value are letters, spaces, and numbers representable in UTF-8 format, and the following special characters: + - = . _ : / @. The maximum length of a tag\'s key is 128 characters, and the maximum length for a tag\'s value is 256.
- *(dict) --*
A key-value pair that helps you manage, filter, and search for your resource. Allowed characters: letters, white space, and numbers, representable in UTF-8, and the following characters: + - = . _ : /
- **Key** *(string) --* **[REQUIRED]**
Tag key (String). The key can\'t start with aws:.
- **Value** *(string) --* **[REQUIRED]**
Value of the tag key.
:rtype: dict
:returns:
"""
pass
def create_smb_file_share(self, ClientToken: str, GatewayARN: str, Role: str, LocationARN: str, KMSEncrypted: bool = None, KMSKey: str = None, DefaultStorageClass: str = None, ObjectACL: str = None, ReadOnly: bool = None, GuessMIMETypeEnabled: bool = None, RequesterPays: bool = None, SMBACLEnabled: bool = None, ValidUserList: List = None, InvalidUserList: List = None, Authentication: str = None, Tags: List = None) -> Dict:
"""
Creates a Server Message Block (SMB) file share on an existing file gateway. In Storage Gateway, a file share is a file system mount point backed by Amazon S3 cloud storage. Storage Gateway expose file shares using a SMB interface. This operation is only supported for file gateways.
.. warning::
File gateways require AWS Security Token Service (AWS STS) to be activated to enable you to create a file share. Make sure that AWS STS is activated in the AWS Region you are creating your file gateway in. If AWS STS is not activated in this AWS Region, activate it. For information about how to activate AWS STS, see `Activating and Deactivating AWS STS in an AWS Region <https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html>`__ in the *AWS Identity and Access Management User Guide.*
File gateways don't support creating hard or symbolic links on a file share.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/CreateSMBFileShare>`_
**Request Syntax**
::
response = client.create_smb_file_share(
ClientToken='string',
GatewayARN='string',
KMSEncrypted=True|False,
KMSKey='string',
Role='string',
LocationARN='string',
DefaultStorageClass='string',
ObjectACL='private'|'public-read'|'public-read-write'|'authenticated-read'|'bucket-owner-read'|'bucket-owner-full-control'|'aws-exec-read',
ReadOnly=True|False,
GuessMIMETypeEnabled=True|False,
RequesterPays=True|False,
SMBACLEnabled=True|False,
ValidUserList=[
'string',
],
InvalidUserList=[
'string',
],
Authentication='string',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
**Response Syntax**
::
{
'FileShareARN': 'string'
}
**Response Structure**
- *(dict) --*
CreateSMBFileShareOutput
- **FileShareARN** *(string) --*
The Amazon Resource Name (ARN) of the newly created file share.
:type ClientToken: string
:param ClientToken: **[REQUIRED]**
A unique string value that you supply that is used by file gateway to ensure idempotent file share creation.
:type GatewayARN: string
:param GatewayARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the file gateway on which you want to create a file share.
:type KMSEncrypted: boolean
:param KMSEncrypted:
True to use Amazon S3 server side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.
:type KMSKey: string
:param KMSKey:
The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server side encryption. This value can only be set when KMSEncrypted is true. Optional.
:type Role: string
:param Role: **[REQUIRED]**
The ARN of the AWS Identity and Access Management (IAM) role that a file gateway assumes when it accesses the underlying storage.
:type LocationARN: string
:param LocationARN: **[REQUIRED]**
The ARN of the backed storage used for storing file data.
:type DefaultStorageClass: string
:param DefaultStorageClass:
The default storage class for objects put into an Amazon S3 bucket by the file gateway. Possible values are ``S3_STANDARD`` , ``S3_STANDARD_IA`` , or ``S3_ONEZONE_IA`` . If this field is not populated, the default value ``S3_STANDARD`` is used. Optional.
:type ObjectACL: string
:param ObjectACL:
A value that sets the access control list permission for objects in the S3 bucket that a file gateway puts objects into. The default value is \"private\".
:type ReadOnly: boolean
:param ReadOnly:
A value that sets the write status of a file share. This value is true if the write status is read-only, and otherwise false.
:type GuessMIMETypeEnabled: boolean
:param GuessMIMETypeEnabled:
A value that enables guessing of the MIME type for uploaded objects based on file extensions. Set this value to true to enable MIME type guessing, and otherwise to false. The default value is true.
:type RequesterPays: boolean
:param RequesterPays:
A value that sets who pays the cost of the request and the cost associated with data download from the S3 bucket. If this value is set to true, the requester pays the costs. Otherwise the S3 bucket owner pays. However, the S3 bucket owner always pays the cost of storing data.
.. note::
``RequesterPays`` is a configuration for the S3 bucket that backs the file share, so make sure that the configuration on the file share is the same as the S3 bucket configuration.
:type SMBACLEnabled: boolean
:param SMBACLEnabled:
Set this value to \"true to enable ACL (access control list) on the SMB file share. Set it to \"false\" to map file and directory permissions to the POSIX permissions.
:type ValidUserList: list
:param ValidUserList:
A list of users or groups in the Active Directory that are allowed to access the file share. A group must be prefixed with the @ character. For example ``@group1`` . Can only be set if Authentication is set to ``ActiveDirectory`` .
- *(string) --*
:type InvalidUserList: list
:param InvalidUserList:
A list of users or groups in the Active Directory that are not allowed to access the file share. A group must be prefixed with the @ character. For example ``@group1`` . Can only be set if Authentication is set to ``ActiveDirectory`` .
- *(string) --*
:type Authentication: string
:param Authentication:
The authentication method that users use to access the file share.
Valid values are ``ActiveDirectory`` or ``GuestAccess`` . The default is ``ActiveDirectory`` .
:type Tags: list
:param Tags:
A list of up to 50 tags that can be assigned to the NFS file share. Each tag is a key-value pair.
.. note::
Valid characters for key and value are letters, spaces, and numbers representable in UTF-8 format, and the following special characters: + - = . _ : / @. The maximum length of a tag\'s key is 128 characters, and the maximum length for a tag\'s value is 256.
- *(dict) --*
A key-value pair that helps you manage, filter, and search for your resource. Allowed characters: letters, white space, and numbers, representable in UTF-8, and the following characters: + - = . _ : /
- **Key** *(string) --* **[REQUIRED]**
Tag key (String). The key can\'t start with aws:.
- **Value** *(string) --* **[REQUIRED]**
Value of the tag key.
:rtype: dict
:returns:
"""
pass
def create_snapshot(self, VolumeARN: str, SnapshotDescription: str) -> Dict:
"""
Initiates a snapshot of a volume.
AWS Storage Gateway provides the ability to back up point-in-time snapshots of your data to Amazon Simple Storage (S3) for durable off-site recovery, as well as import the data to an Amazon Elastic Block Store (EBS) volume in Amazon Elastic Compute Cloud (EC2). You can take snapshots of your gateway volume on a scheduled or ad hoc basis. This API enables you to take ad-hoc snapshot. For more information, see `Editing a Snapshot Schedule <https://docs.aws.amazon.com/storagegateway/latest/userguide/managing-volumes.html#SchedulingSnapshot>`__ .
In the CreateSnapshot request you identify the volume by providing its Amazon Resource Name (ARN). You must also provide description for the snapshot. When AWS Storage Gateway takes the snapshot of specified volume, the snapshot and description appears in the AWS Storage Gateway Console. In response, AWS Storage Gateway returns you a snapshot ID. You can use this snapshot ID to check the snapshot progress or later use it when you want to create a volume from a snapshot. This operation is only supported in stored and cached volume gateway type.
.. note::
To list or delete a snapshot, you must use the Amazon EC2 API. For more information, see DescribeSnapshots or DeleteSnapshot in the `EC2 API reference <https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_Operations.html>`__ .
.. warning::
Volume and snapshot IDs are changing to a longer length ID format. For more information, see the important note on the `Welcome <https://docs.aws.amazon.com/storagegateway/latest/APIReference/Welcome.html>`__ page.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/CreateSnapshot>`_
**Request Syntax**
::
response = client.create_snapshot(
VolumeARN='string',
SnapshotDescription='string'
)
**Response Syntax**
::
{
'VolumeARN': 'string',
'SnapshotId': 'string'
}
**Response Structure**
- *(dict) --*
A JSON object containing the following fields:
- **VolumeARN** *(string) --*
The Amazon Resource Name (ARN) of the volume of which the snapshot was taken.
- **SnapshotId** *(string) --*
The snapshot ID that is used to refer to the snapshot in future operations such as describing snapshots (Amazon Elastic Compute Cloud API ``DescribeSnapshots`` ) or creating a volume from a snapshot ( CreateStorediSCSIVolume ).
:type VolumeARN: string
:param VolumeARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the volume. Use the ListVolumes operation to return a list of gateway volumes.
:type SnapshotDescription: string
:param SnapshotDescription: **[REQUIRED]**
Textual description of the snapshot that appears in the Amazon EC2 console, Elastic Block Store snapshots panel in the **Description** field, and in the AWS Storage Gateway snapshot **Details** pane, **Description** field
:rtype: dict
:returns:
"""
pass
def create_snapshot_from_volume_recovery_point(self, VolumeARN: str, SnapshotDescription: str) -> Dict:
"""
Initiates a snapshot of a gateway from a volume recovery point. This operation is only supported in the cached volume gateway type.
A volume recovery point is a point in time at which all data of the volume is consistent and from which you can create a snapshot. To get a list of volume recovery point for cached volume gateway, use ListVolumeRecoveryPoints .
In the ``CreateSnapshotFromVolumeRecoveryPoint`` request, you identify the volume by providing its Amazon Resource Name (ARN). You must also provide a description for the snapshot. When the gateway takes a snapshot of the specified volume, the snapshot and its description appear in the AWS Storage Gateway console. In response, the gateway returns you a snapshot ID. You can use this snapshot ID to check the snapshot progress or later use it when you want to create a volume from a snapshot.
.. note::
To list or delete a snapshot, you must use the Amazon EC2 API. For more information, in *Amazon Elastic Compute Cloud API Reference* .
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/CreateSnapshotFromVolumeRecoveryPoint>`_
**Request Syntax**
::
response = client.create_snapshot_from_volume_recovery_point(
VolumeARN='string',
SnapshotDescription='string'
)
**Response Syntax**
::
{
'SnapshotId': 'string',
'VolumeARN': 'string',
'VolumeRecoveryPointTime': 'string'
}
**Response Structure**
- *(dict) --*
- **SnapshotId** *(string) --*
The ID of the snapshot.
- **VolumeARN** *(string) --*
The Amazon Resource Name (ARN) of the iSCSI volume target. Use the DescribeStorediSCSIVolumes operation to return to retrieve the TargetARN for specified VolumeARN.
- **VolumeRecoveryPointTime** *(string) --*
The time the volume was created from the recovery point.
:type VolumeARN: string
:param VolumeARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the iSCSI volume target. Use the DescribeStorediSCSIVolumes operation to return to retrieve the TargetARN for specified VolumeARN.
:type SnapshotDescription: string
:param SnapshotDescription: **[REQUIRED]**
Textual description of the snapshot that appears in the Amazon EC2 console, Elastic Block Store snapshots panel in the **Description** field, and in the AWS Storage Gateway snapshot **Details** pane, **Description** field
:rtype: dict
:returns:
"""
pass
def create_stored_iscsi_volume(self, GatewayARN: str, DiskId: str, PreserveExistingData: bool, TargetName: str, NetworkInterfaceId: str, SnapshotId: str = None, KMSEncrypted: bool = None, KMSKey: str = None, Tags: List = None) -> Dict:
"""
Creates a volume on a specified gateway. This operation is only supported in the stored volume gateway type.
The size of the volume to create is inferred from the disk size. You can choose to preserve existing data on the disk, create volume from an existing snapshot, or create an empty volume. If you choose to create an empty gateway volume, then any existing data on the disk is erased.
In the request you must specify the gateway and the disk information on which you are creating the volume. In response, the gateway creates the volume and returns volume information such as the volume Amazon Resource Name (ARN), its size, and the iSCSI target ARN that initiators can use to connect to the volume target.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/CreateStorediSCSIVolume>`_
**Request Syntax**
::
response = client.create_stored_iscsi_volume(
GatewayARN='string',
DiskId='string',
SnapshotId='string',
PreserveExistingData=True|False,
TargetName='string',
NetworkInterfaceId='string',
KMSEncrypted=True|False,
KMSKey='string',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
**Response Syntax**
::
{
'VolumeARN': 'string',
'VolumeSizeInBytes': 123,
'TargetARN': 'string'
}
**Response Structure**
- *(dict) --*
A JSON object containing the following fields:
- **VolumeARN** *(string) --*
The Amazon Resource Name (ARN) of the configured volume.
- **VolumeSizeInBytes** *(integer) --*
The size of the volume in bytes.
- **TargetARN** *(string) --*
The Amazon Resource Name (ARN) of the volume target, which includes the iSCSI name that initiators can use to connect to the target.
:type GatewayARN: string
:param GatewayARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
:type DiskId: string
:param DiskId: **[REQUIRED]**
The unique identifier for the gateway local disk that is configured as a stored volume. Use `ListLocalDisks <https://docs.aws.amazon.com/storagegateway/latest/userguide/API_ListLocalDisks.html>`__ to list disk IDs for a gateway.
:type SnapshotId: string
:param SnapshotId:
The snapshot ID (e.g. \"snap-1122aabb\") of the snapshot to restore as the new stored volume. Specify this field if you want to create the iSCSI storage volume from a snapshot otherwise do not include this field. To list snapshots for your account use `DescribeSnapshots <https://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeSnapshots.html>`__ in the *Amazon Elastic Compute Cloud API Reference* .
:type PreserveExistingData: boolean
:param PreserveExistingData: **[REQUIRED]**
Specify this field as true if you want to preserve the data on the local disk. Otherwise, specifying this field as false creates an empty volume.
Valid Values: true, false
:type TargetName: string
:param TargetName: **[REQUIRED]**
The name of the iSCSI target used by an initiator to connect to a volume and used as a suffix for the target ARN. For example, specifying ``TargetName`` as *myvolume* results in the target ARN of ``arn:aws:storagegateway:us-east-2:111122223333:gateway/sgw-12A3456B/target/iqn.1997-05.com.amazon:myvolume`` . The target name must be unique across all volumes on a gateway.
If you don\'t specify a value, Storage Gateway uses the value that was previously used for this volume as the new target name.
:type NetworkInterfaceId: string
:param NetworkInterfaceId: **[REQUIRED]**
The network interface of the gateway on which to expose the iSCSI target. Only IPv4 addresses are accepted. Use DescribeGatewayInformation to get a list of the network interfaces available on a gateway.
Valid Values: A valid IP address.
:type KMSEncrypted: boolean
:param KMSEncrypted:
True to use Amazon S3 server side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.
:type KMSKey: string
:param KMSKey:
The Amazon Resource Name (ARN) of the KMS key used for Amazon S3 server side encryption. This value can only be set when KMSEncrypted is true. Optional.
:type Tags: list
:param Tags:
A list of up to 50 tags that can be assigned to a stored volume. Each tag is a key-value pair.
.. note::
Valid characters for key and value are letters, spaces, and numbers representable in UTF-8 format, and the following special characters: + - = . _ : / @. The maximum length of a tag\'s key is 128 characters, and the maximum length for a tag\'s value is 256.
- *(dict) --*
A key-value pair that helps you manage, filter, and search for your resource. Allowed characters: letters, white space, and numbers, representable in UTF-8, and the following characters: + - = . _ : /
- **Key** *(string) --* **[REQUIRED]**
Tag key (String). The key can\'t start with aws:.
- **Value** *(string) --* **[REQUIRED]**
Value of the tag key.
:rtype: dict
:returns:
"""
pass
def create_tape_with_barcode(self, GatewayARN: str, TapeSizeInBytes: int, TapeBarcode: str, KMSEncrypted: bool = None, KMSKey: str = None, PoolId: str = None, Tags: List = None) -> Dict:
"""
Creates a virtual tape by using your own barcode. You write data to the virtual tape and then archive the tape. A barcode is unique and can not be reused if it has already been used on a tape . This applies to barcodes used on deleted tapes. This operation is only supported in the tape gateway type.
.. note::
Cache storage must be allocated to the gateway before you can create a virtual tape. Use the AddCache operation to add cache storage to a gateway.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/CreateTapeWithBarcode>`_
**Request Syntax**
::
response = client.create_tape_with_barcode(
GatewayARN='string',
TapeSizeInBytes=123,
TapeBarcode='string',
KMSEncrypted=True|False,
KMSKey='string',
PoolId='string',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
**Response Syntax**
::
{
'TapeARN': 'string'
}
**Response Structure**
- *(dict) --*
CreateTapeOutput
- **TapeARN** *(string) --*
A unique Amazon Resource Name (ARN) that represents the virtual tape that was created.
:type GatewayARN: string
:param GatewayARN: **[REQUIRED]**
The unique Amazon Resource Name (ARN) that represents the gateway to associate the virtual tape with. Use the ListGateways operation to return a list of gateways for your account and region.
:type TapeSizeInBytes: integer
:param TapeSizeInBytes: **[REQUIRED]**
The size, in bytes, of the virtual tape that you want to create.
.. note::
The size must be aligned by gigabyte (1024*1024*1024 byte).
:type TapeBarcode: string
:param TapeBarcode: **[REQUIRED]**
The barcode that you want to assign to the tape.
.. note::
Barcodes cannot be reused. This includes barcodes used for tapes that have been deleted.
:type KMSEncrypted: boolean
:param KMSEncrypted:
True to use Amazon S3 server side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.
:type KMSKey: string
:param KMSKey:
The Amazon Resource Name (ARN) of the AWS KMS Key used for Amazon S3 server side encryption. This value can only be set when KMSEncrypted is true. Optional.
:type PoolId: string
:param PoolId:
The ID of the pool that you want to add your tape to for archiving. The tape in this pool is archived in the S3 storage class that is associated with the pool. When you use your backup application to eject the tape, the tape is archived directly into the storage class (Glacier or Deep Archive) that corresponds to the pool.
Valid values: \"GLACIER\", \"DEEP_ARCHIVE\"
:type Tags: list
:param Tags:
A list of up to 50 tags that can be assigned to a virtual tape that has a barcode. Each tag is a key-value pair.
.. note::
Valid characters for key and value are letters, spaces, and numbers representable in UTF-8 format, and the following special characters: + - = . _ : / @. The maximum length of a tag\'s key is 128 characters, and the maximum length for a tag\'s value is 256.
- *(dict) --*
A key-value pair that helps you manage, filter, and search for your resource. Allowed characters: letters, white space, and numbers, representable in UTF-8, and the following characters: + - = . _ : /
- **Key** *(string) --* **[REQUIRED]**
Tag key (String). The key can\'t start with aws:.
- **Value** *(string) --* **[REQUIRED]**
Value of the tag key.
:rtype: dict
:returns:
"""
pass
def create_tapes(self, GatewayARN: str, TapeSizeInBytes: int, ClientToken: str, NumTapesToCreate: int, TapeBarcodePrefix: str, KMSEncrypted: bool = None, KMSKey: str = None, PoolId: str = None, Tags: List = None) -> Dict:
"""
Creates one or more virtual tapes. You write data to the virtual tapes and then archive the tapes. This operation is only supported in the tape gateway type.
.. note::
Cache storage must be allocated to the gateway before you can create virtual tapes. Use the AddCache operation to add cache storage to a gateway.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/CreateTapes>`_
**Request Syntax**
::
response = client.create_tapes(
GatewayARN='string',
TapeSizeInBytes=123,
ClientToken='string',
NumTapesToCreate=123,
TapeBarcodePrefix='string',
KMSEncrypted=True|False,
KMSKey='string',
PoolId='string',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
**Response Syntax**
::
{
'TapeARNs': [
'string',
]
}
**Response Structure**
- *(dict) --*
CreateTapeOutput
- **TapeARNs** *(list) --*
A list of unique Amazon Resource Names (ARNs) that represents the virtual tapes that were created.
- *(string) --*
:type GatewayARN: string
:param GatewayARN: **[REQUIRED]**
The unique Amazon Resource Name (ARN) that represents the gateway to associate the virtual tapes with. Use the ListGateways operation to return a list of gateways for your account and region.
:type TapeSizeInBytes: integer
:param TapeSizeInBytes: **[REQUIRED]**
The size, in bytes, of the virtual tapes that you want to create.
.. note::
The size must be aligned by gigabyte (1024*1024*1024 byte).
:type ClientToken: string
:param ClientToken: **[REQUIRED]**
A unique identifier that you use to retry a request. If you retry a request, use the same ``ClientToken`` you specified in the initial request.
.. note::
Using the same ``ClientToken`` prevents creating the tape multiple times.
:type NumTapesToCreate: integer
:param NumTapesToCreate: **[REQUIRED]**
The number of virtual tapes that you want to create.
:type TapeBarcodePrefix: string
:param TapeBarcodePrefix: **[REQUIRED]**
A prefix that you append to the barcode of the virtual tape you are creating. This prefix makes the barcode unique.
.. note::
The prefix must be 1 to 4 characters in length and must be one of the uppercase letters from A to Z.
:type KMSEncrypted: boolean
:param KMSEncrypted:
True to use Amazon S3 server side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.
:type KMSKey: string
:param KMSKey:
The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server side encryption. This value can only be set when KMSEncrypted is true. Optional.
:type PoolId: string
:param PoolId:
The ID of the pool that you want to add your tape to for archiving. The tape in this pool is archived in the S3 storage class that is associated with the pool. When you use your backup application to eject the tape, the tape is archived directly into the storage class (Glacier or Deep Archive) that corresponds to the pool.
Valid values: \"GLACIER\", \"DEEP_ARCHIVE\"
:type Tags: list
:param Tags:
A list of up to 50 tags that can be assigned to a virtual tape. Each tag is a key-value pair.
.. note::
Valid characters for key and value are letters, spaces, and numbers representable in UTF-8 format, and the following special characters: + - = . _ : / @. The maximum length of a tag\'s key is 128 characters, and the maximum length for a tag\'s value is 256.
- *(dict) --*
A key-value pair that helps you manage, filter, and search for your resource. Allowed characters: letters, white space, and numbers, representable in UTF-8, and the following characters: + - = . _ : /
- **Key** *(string) --* **[REQUIRED]**
Tag key (String). The key can\'t start with aws:.
- **Value** *(string) --* **[REQUIRED]**
Value of the tag key.
:rtype: dict
:returns:
"""
pass
def delete_bandwidth_rate_limit(self, GatewayARN: str, BandwidthType: str) -> Dict:
"""
Deletes the bandwidth rate limits of a gateway. You can delete either the upload and download bandwidth rate limit, or you can delete both. If you delete only one of the limits, the other limit remains unchanged. To specify which gateway to work with, use the Amazon Resource Name (ARN) of the gateway in your request.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DeleteBandwidthRateLimit>`_
**Request Syntax**
::
response = client.delete_bandwidth_rate_limit(
GatewayARN='string',
BandwidthType='string'
)
**Response Syntax**
::
{
'GatewayARN': 'string'
}
**Response Structure**
- *(dict) --*
A JSON object containing the of the gateway whose bandwidth rate information was deleted.
- **GatewayARN** *(string) --*
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
:type GatewayARN: string
:param GatewayARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
:type BandwidthType: string
:param BandwidthType: **[REQUIRED]**
One of the BandwidthType values that indicates the gateway bandwidth rate limit to delete.
Valid Values: ``Upload`` , ``Download`` , ``All`` .
:rtype: dict
:returns:
"""
pass
def delete_chap_credentials(self, TargetARN: str, InitiatorName: str) -> Dict:
"""
Deletes Challenge-Handshake Authentication Protocol (CHAP) credentials for a specified iSCSI target and initiator pair.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DeleteChapCredentials>`_
**Request Syntax**
::
response = client.delete_chap_credentials(
TargetARN='string',
InitiatorName='string'
)
**Response Syntax**
::
{
'TargetARN': 'string',
'InitiatorName': 'string'
}
**Response Structure**
- *(dict) --*
A JSON object containing the following fields:
- **TargetARN** *(string) --*
The Amazon Resource Name (ARN) of the target.
- **InitiatorName** *(string) --*
The iSCSI initiator that connects to the target.
:type TargetARN: string
:param TargetARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the iSCSI volume target. Use the DescribeStorediSCSIVolumes operation to return to retrieve the TargetARN for specified VolumeARN.
:type InitiatorName: string
:param InitiatorName: **[REQUIRED]**
The iSCSI initiator that connects to the target.
:rtype: dict
:returns:
"""
pass
def delete_file_share(self, FileShareARN: str, ForceDelete: bool = None) -> Dict:
"""
Deletes a file share from a file gateway. This operation is only supported for file gateways.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DeleteFileShare>`_
**Request Syntax**
::
response = client.delete_file_share(
FileShareARN='string',
ForceDelete=True|False
)
**Response Syntax**
::
{
'FileShareARN': 'string'
}
**Response Structure**
- *(dict) --*
DeleteFileShareOutput
- **FileShareARN** *(string) --*
The Amazon Resource Name (ARN) of the deleted file share.
:type FileShareARN: string
:param FileShareARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the file share to be deleted.
:type ForceDelete: boolean
:param ForceDelete:
If this value is set to true, the operation deletes a file share immediately and aborts all data uploads to AWS. Otherwise, the file share is not deleted until all data is uploaded to AWS. This process aborts the data upload process, and the file share enters the FORCE_DELETING status.
:rtype: dict
:returns:
"""
pass
def delete_gateway(self, GatewayARN: str) -> Dict:
"""
Deletes a gateway. To specify which gateway to delete, use the Amazon Resource Name (ARN) of the gateway in your request. The operation deletes the gateway; however, it does not delete the gateway virtual machine (VM) from your host computer.
After you delete a gateway, you cannot reactivate it. Completed snapshots of the gateway volumes are not deleted upon deleting the gateway, however, pending snapshots will not complete. After you delete a gateway, your next step is to remove it from your environment.
.. warning::
You no longer pay software charges after the gateway is deleted; however, your existing Amazon EBS snapshots persist and you will continue to be billed for these snapshots. You can choose to remove all remaining Amazon EBS snapshots by canceling your Amazon EC2 subscription. If you prefer not to cancel your Amazon EC2 subscription, you can delete your snapshots using the Amazon EC2 console. For more information, see the `AWS Storage Gateway Detail Page <http://aws.amazon.com/storagegateway>`__ .
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DeleteGateway>`_
**Request Syntax**
::
response = client.delete_gateway(
GatewayARN='string'
)
**Response Syntax**
::
{
'GatewayARN': 'string'
}
**Response Structure**
- *(dict) --*
A JSON object containing the ID of the deleted gateway.
- **GatewayARN** *(string) --*
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
:type GatewayARN: string
:param GatewayARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
:rtype: dict
:returns:
"""
pass
def delete_snapshot_schedule(self, VolumeARN: str) -> Dict:
"""
Deletes a snapshot of a volume.
You can take snapshots of your gateway volumes on a scheduled or ad hoc basis. This API action enables you to delete a snapshot schedule for a volume. For more information, see `Working with Snapshots <https://docs.aws.amazon.com/storagegateway/latest/userguide/WorkingWithSnapshots.html>`__ . In the ``DeleteSnapshotSchedule`` request, you identify the volume by providing its Amazon Resource Name (ARN). This operation is only supported in stored and cached volume gateway types.
.. note::
To list or delete a snapshot, you must use the Amazon EC2 API. in *Amazon Elastic Compute Cloud API Reference* .
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DeleteSnapshotSchedule>`_
**Request Syntax**
::
response = client.delete_snapshot_schedule(
VolumeARN='string'
)
**Response Syntax**
::
{
'VolumeARN': 'string'
}
**Response Structure**
- *(dict) --*
- **VolumeARN** *(string) --*
The volume which snapshot schedule was deleted.
:type VolumeARN: string
:param VolumeARN: **[REQUIRED]**
The volume which snapshot schedule to delete.
:rtype: dict
:returns:
"""
pass
def delete_tape(self, GatewayARN: str, TapeARN: str) -> Dict:
"""
Deletes the specified virtual tape. This operation is only supported in the tape gateway type.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DeleteTape>`_
**Request Syntax**
::
response = client.delete_tape(
GatewayARN='string',
TapeARN='string'
)
**Response Syntax**
::
{
'TapeARN': 'string'
}
**Response Structure**
- *(dict) --*
DeleteTapeOutput
- **TapeARN** *(string) --*
The Amazon Resource Name (ARN) of the deleted virtual tape.
:type GatewayARN: string
:param GatewayARN: **[REQUIRED]**
The unique Amazon Resource Name (ARN) of the gateway that the virtual tape to delete is associated with. Use the ListGateways operation to return a list of gateways for your account and region.
:type TapeARN: string
:param TapeARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the virtual tape to delete.
:rtype: dict
:returns:
"""
pass
def delete_tape_archive(self, TapeARN: str) -> Dict:
"""
Deletes the specified virtual tape from the virtual tape shelf (VTS). This operation is only supported in the tape gateway type.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DeleteTapeArchive>`_
**Request Syntax**
::
response = client.delete_tape_archive(
TapeARN='string'
)
**Response Syntax**
::
{
'TapeARN': 'string'
}
**Response Structure**
- *(dict) --*
DeleteTapeArchiveOutput
- **TapeARN** *(string) --*
The Amazon Resource Name (ARN) of the virtual tape that was deleted from the virtual tape shelf (VTS).
:type TapeARN: string
:param TapeARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the virtual tape to delete from the virtual tape shelf (VTS).
:rtype: dict
:returns:
"""
pass
def delete_volume(self, VolumeARN: str) -> Dict:
"""
Deletes the specified storage volume that you previously created using the CreateCachediSCSIVolume or CreateStorediSCSIVolume API. This operation is only supported in the cached volume and stored volume types. For stored volume gateways, the local disk that was configured as the storage volume is not deleted. You can reuse the local disk to create another storage volume.
Before you delete a volume, make sure there are no iSCSI connections to the volume you are deleting. You should also make sure there is no snapshot in progress. You can use the Amazon Elastic Compute Cloud (Amazon EC2) API to query snapshots on the volume you are deleting and check the snapshot status. For more information, go to `DescribeSnapshots <https://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeSnapshots.html>`__ in the *Amazon Elastic Compute Cloud API Reference* .
In the request, you must provide the Amazon Resource Name (ARN) of the storage volume you want to delete.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DeleteVolume>`_
**Request Syntax**
::
response = client.delete_volume(
VolumeARN='string'
)
**Response Syntax**
::
{
'VolumeARN': 'string'
}
**Response Structure**
- *(dict) --*
A JSON object containing the of the storage volume that was deleted
- **VolumeARN** *(string) --*
The Amazon Resource Name (ARN) of the storage volume that was deleted. It is the same ARN you provided in the request.
:type VolumeARN: string
:param VolumeARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the volume. Use the ListVolumes operation to return a list of gateway volumes.
:rtype: dict
:returns:
"""
pass
def describe_bandwidth_rate_limit(self, GatewayARN: str) -> Dict:
"""
Returns the bandwidth rate limits of a gateway. By default, these limits are not set, which means no bandwidth rate limiting is in effect.
This operation only returns a value for a bandwidth rate limit only if the limit is set. If no limits are set for the gateway, then this operation returns only the gateway ARN in the response body. To specify which gateway to describe, use the Amazon Resource Name (ARN) of the gateway in your request.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DescribeBandwidthRateLimit>`_
**Request Syntax**
::
response = client.describe_bandwidth_rate_limit(
GatewayARN='string'
)
**Response Syntax**
::
{
'GatewayARN': 'string',
'AverageUploadRateLimitInBitsPerSec': 123,
'AverageDownloadRateLimitInBitsPerSec': 123
}
**Response Structure**
- *(dict) --*
A JSON object containing the following fields:
- **GatewayARN** *(string) --*
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
- **AverageUploadRateLimitInBitsPerSec** *(integer) --*
The average upload bandwidth rate limit in bits per second. This field does not appear in the response if the upload rate limit is not set.
- **AverageDownloadRateLimitInBitsPerSec** *(integer) --*
The average download bandwidth rate limit in bits per second. This field does not appear in the response if the download rate limit is not set.
:type GatewayARN: string
:param GatewayARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
:rtype: dict
:returns:
"""
pass
def describe_cache(self, GatewayARN: str) -> Dict:
"""
Returns information about the cache of a gateway. This operation is only supported in the cached volume, tape and file gateway types.
The response includes disk IDs that are configured as cache, and it includes the amount of cache allocated and used.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DescribeCache>`_
**Request Syntax**
::
response = client.describe_cache(
GatewayARN='string'
)
**Response Syntax**
::
{
'GatewayARN': 'string',
'DiskIds': [
'string',
],
'CacheAllocatedInBytes': 123,
'CacheUsedPercentage': 123.0,
'CacheDirtyPercentage': 123.0,
'CacheHitPercentage': 123.0,
'CacheMissPercentage': 123.0
}
**Response Structure**
- *(dict) --*
- **GatewayARN** *(string) --*
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
- **DiskIds** *(list) --*
An array of strings that identify disks that are to be configured as working storage. Each string have a minimum length of 1 and maximum length of 300. You can get the disk IDs from the ListLocalDisks API.
- *(string) --*
- **CacheAllocatedInBytes** *(integer) --*
The amount of cache in bytes allocated to the a gateway.
- **CacheUsedPercentage** *(float) --*
Percent use of the gateway's cache storage. This metric applies only to the gateway-cached volume setup. The sample is taken at the end of the reporting period.
- **CacheDirtyPercentage** *(float) --*
The file share's contribution to the overall percentage of the gateway's cache that has not been persisted to AWS. The sample is taken at the end of the reporting period.
- **CacheHitPercentage** *(float) --*
Percent of application read operations from the file shares that are served from cache. The sample is taken at the end of the reporting period.
- **CacheMissPercentage** *(float) --*
Percent of application read operations from the file shares that are not served from cache. The sample is taken at the end of the reporting period.
:type GatewayARN: string
:param GatewayARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
:rtype: dict
:returns:
"""
pass
def describe_cached_iscsi_volumes(self, VolumeARNs: List) -> Dict:
"""
Returns a description of the gateway volumes specified in the request. This operation is only supported in the cached volume gateway types.
The list of gateway volumes in the request must be from one gateway. In the response Amazon Storage Gateway returns volume information sorted by volume Amazon Resource Name (ARN).
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DescribeCachediSCSIVolumes>`_
**Request Syntax**
::
response = client.describe_cached_iscsi_volumes(
VolumeARNs=[
'string',
]
)
**Response Syntax**
::
{
'CachediSCSIVolumes': [
{
'VolumeARN': 'string',
'VolumeId': 'string',
'VolumeType': 'string',
'VolumeStatus': 'string',
'VolumeAttachmentStatus': 'string',
'VolumeSizeInBytes': 123,
'VolumeProgress': 123.0,
'SourceSnapshotId': 'string',
'VolumeiSCSIAttributes': {
'TargetARN': 'string',
'NetworkInterfaceId': 'string',
'NetworkInterfacePort': 123,
'LunNumber': 123,
'ChapEnabled': True|False
},
'CreatedDate': datetime(2015, 1, 1),
'VolumeUsedInBytes': 123,
'KMSKey': 'string',
'TargetName': 'string'
},
]
}
**Response Structure**
- *(dict) --*
A JSON object containing the following fields:
- **CachediSCSIVolumes** *(list) --*
An array of objects where each object contains metadata about one cached volume.
- *(dict) --*
Describes an iSCSI cached volume.
- **VolumeARN** *(string) --*
The Amazon Resource Name (ARN) of the storage volume.
- **VolumeId** *(string) --*
The unique identifier of the volume, e.g. vol-AE4B946D.
- **VolumeType** *(string) --*
One of the VolumeType enumeration values that describes the type of the volume.
- **VolumeStatus** *(string) --*
One of the VolumeStatus values that indicates the state of the storage volume.
- **VolumeAttachmentStatus** *(string) --*
A value that indicates whether a storage volume is attached to or detached from a gateway. For more information, see `Moving Your Volumes to a Different Gateway <https://docs.aws.amazon.com/storagegateway/latest/userguide/managing-volumes.html#attach-detach-volume>`__ .
- **VolumeSizeInBytes** *(integer) --*
The size, in bytes, of the volume capacity.
- **VolumeProgress** *(float) --*
Represents the percentage complete if the volume is restoring or bootstrapping that represents the percent of data transferred. This field does not appear in the response if the cached volume is not restoring or bootstrapping.
- **SourceSnapshotId** *(string) --*
If the cached volume was created from a snapshot, this field contains the snapshot ID used, e.g. snap-78e22663. Otherwise, this field is not included.
- **VolumeiSCSIAttributes** *(dict) --*
An VolumeiSCSIAttributes object that represents a collection of iSCSI attributes for one stored volume.
- **TargetARN** *(string) --*
The Amazon Resource Name (ARN) of the volume target.
- **NetworkInterfaceId** *(string) --*
The network interface identifier.
- **NetworkInterfacePort** *(integer) --*
The port used to communicate with iSCSI targets.
- **LunNumber** *(integer) --*
The logical disk number.
- **ChapEnabled** *(boolean) --*
Indicates whether mutual CHAP is enabled for the iSCSI target.
- **CreatedDate** *(datetime) --*
The date the volume was created. Volumes created prior to March 28, 2017 don’t have this time stamp.
- **VolumeUsedInBytes** *(integer) --*
The size of the data stored on the volume in bytes. This value is calculated based on the number of blocks that are touched, instead of the actual amount of data written. This value can be useful for sequential write patterns but less accurate for random write patterns. ``VolumeUsedInBytes`` is different from the compressed size of the volume, which is the value that is used to calculate your bill.
.. note::
This value is not available for volumes created prior to May 13, 2015, until you store data on the volume.
- **KMSKey** *(string) --*
The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server side encryption. This value can only be set when KMSEncrypted is true. Optional.
- **TargetName** *(string) --*
The name of the iSCSI target used by an initiator to connect to a volume and used as a suffix for the target ARN. For example, specifying ``TargetName`` as *myvolume* results in the target ARN of ``arn:aws:storagegateway:us-east-2:111122223333:gateway/sgw-12A3456B/target/iqn.1997-05.com.amazon:myvolume`` . The target name must be unique across all volumes on a gateway.
If you don't specify a value, Storage Gateway uses the value that was previously used for this volume as the new target name.
:type VolumeARNs: list
:param VolumeARNs: **[REQUIRED]**
An array of strings where each string represents the Amazon Resource Name (ARN) of a cached volume. All of the specified cached volumes must from the same gateway. Use ListVolumes to get volume ARNs for a gateway.
- *(string) --*
:rtype: dict
:returns:
"""
pass
def describe_chap_credentials(self, TargetARN: str) -> Dict:
"""
Returns an array of Challenge-Handshake Authentication Protocol (CHAP) credentials information for a specified iSCSI target, one for each target-initiator pair.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DescribeChapCredentials>`_
**Request Syntax**
::
response = client.describe_chap_credentials(
TargetARN='string'
)
**Response Syntax**
::
{
'ChapCredentials': [
{
'TargetARN': 'string',
'SecretToAuthenticateInitiator': 'string',
'InitiatorName': 'string',
'SecretToAuthenticateTarget': 'string'
},
]
}
**Response Structure**
- *(dict) --*
A JSON object containing a .
- **ChapCredentials** *(list) --*
An array of ChapInfo objects that represent CHAP credentials. Each object in the array contains CHAP credential information for one target-initiator pair. If no CHAP credentials are set, an empty array is returned. CHAP credential information is provided in a JSON object with the following fields:
* **InitiatorName** : The iSCSI initiator that connects to the target.
* **SecretToAuthenticateInitiator** : The secret key that the initiator (for example, the Windows client) must provide to participate in mutual CHAP with the target.
* **SecretToAuthenticateTarget** : The secret key that the target must provide to participate in mutual CHAP with the initiator (e.g. Windows client).
* **TargetARN** : The Amazon Resource Name (ARN) of the storage volume.
- *(dict) --*
Describes Challenge-Handshake Authentication Protocol (CHAP) information that supports authentication between your gateway and iSCSI initiators.
- **TargetARN** *(string) --*
The Amazon Resource Name (ARN) of the volume.
Valid Values: 50 to 500 lowercase letters, numbers, periods (.), and hyphens (-).
- **SecretToAuthenticateInitiator** *(string) --*
The secret key that the initiator (for example, the Windows client) must provide to participate in mutual CHAP with the target.
- **InitiatorName** *(string) --*
The iSCSI initiator that connects to the target.
- **SecretToAuthenticateTarget** *(string) --*
The secret key that the target must provide to participate in mutual CHAP with the initiator (e.g. Windows client).
:type TargetARN: string
:param TargetARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the iSCSI volume target. Use the DescribeStorediSCSIVolumes operation to return to retrieve the TargetARN for specified VolumeARN.
:rtype: dict
:returns:
"""
pass
def describe_gateway_information(self, GatewayARN: str) -> Dict:
"""
Returns metadata about a gateway such as its name, network interfaces, configured time zone, and the state (whether the gateway is running or not). To specify which gateway to describe, use the Amazon Resource Name (ARN) of the gateway in your request.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DescribeGatewayInformation>`_
**Request Syntax**
::
response = client.describe_gateway_information(
GatewayARN='string'
)
**Response Syntax**
::
{
'GatewayARN': 'string',
'GatewayId': 'string',
'GatewayName': 'string',
'GatewayTimezone': 'string',
'GatewayState': 'string',
'GatewayNetworkInterfaces': [
{
'Ipv4Address': 'string',
'MacAddress': 'string',
'Ipv6Address': 'string'
},
],
'GatewayType': 'string',
'NextUpdateAvailabilityDate': 'string',
'LastSoftwareUpdate': 'string',
'Ec2InstanceId': 'string',
'Ec2InstanceRegion': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
**Response Structure**
- *(dict) --*
A JSON object containing the following fields:
- **GatewayARN** *(string) --*
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
- **GatewayId** *(string) --*
The unique identifier assigned to your gateway during activation. This ID becomes part of the gateway Amazon Resource Name (ARN), which you use as input for other operations.
- **GatewayName** *(string) --*
The name you configured for your gateway.
- **GatewayTimezone** *(string) --*
A value that indicates the time zone configured for the gateway.
- **GatewayState** *(string) --*
A value that indicates the operating state of the gateway.
- **GatewayNetworkInterfaces** *(list) --*
A NetworkInterface array that contains descriptions of the gateway network interfaces.
- *(dict) --*
Describes a gateway's network interface.
- **Ipv4Address** *(string) --*
The Internet Protocol version 4 (IPv4) address of the interface.
- **MacAddress** *(string) --*
The Media Access Control (MAC) address of the interface.
.. note::
This is currently unsupported and will not be returned in output.
- **Ipv6Address** *(string) --*
The Internet Protocol version 6 (IPv6) address of the interface. *Currently not supported* .
- **GatewayType** *(string) --*
The type of the gateway.
- **NextUpdateAvailabilityDate** *(string) --*
The date on which an update to the gateway is available. This date is in the time zone of the gateway. If the gateway is not available for an update this field is not returned in the response.
- **LastSoftwareUpdate** *(string) --*
The date on which the last software update was applied to the gateway. If the gateway has never been updated, this field does not return a value in the response.
- **Ec2InstanceId** *(string) --*
The ID of the Amazon EC2 instance that was used to launch the gateway.
- **Ec2InstanceRegion** *(string) --*
The AWS Region where the Amazon EC2 instance is located.
- **Tags** *(list) --*
A list of up to 50 tags assigned to the gateway, sorted alphabetically by key name. Each tag is a key-value pair. For a gateway with more than 10 tags assigned, you can view all tags using the ``ListTagsForResource`` API operation.
- *(dict) --*
A key-value pair that helps you manage, filter, and search for your resource. Allowed characters: letters, white space, and numbers, representable in UTF-8, and the following characters: + - = . _ : /
- **Key** *(string) --*
Tag key (String). The key can't start with aws:.
- **Value** *(string) --*
Value of the tag key.
:type GatewayARN: string
:param GatewayARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
:rtype: dict
:returns:
"""
pass
def describe_maintenance_start_time(self, GatewayARN: str) -> Dict:
"""
Returns your gateway's weekly maintenance start time including the day and time of the week. Note that values are in terms of the gateway's time zone.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DescribeMaintenanceStartTime>`_
**Request Syntax**
::
response = client.describe_maintenance_start_time(
GatewayARN='string'
)
**Response Syntax**
::
{
'GatewayARN': 'string',
'HourOfDay': 123,
'MinuteOfHour': 123,
'DayOfWeek': 123,
'DayOfMonth': 123,
'Timezone': 'string'
}
**Response Structure**
- *(dict) --*
A JSON object containing the following fields:
* DescribeMaintenanceStartTimeOutput$DayOfMonth
* DescribeMaintenanceStartTimeOutput$DayOfWeek
* DescribeMaintenanceStartTimeOutput$HourOfDay
* DescribeMaintenanceStartTimeOutput$MinuteOfHour
* DescribeMaintenanceStartTimeOutput$Timezone
- **GatewayARN** *(string) --*
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
- **HourOfDay** *(integer) --*
The hour component of the maintenance start time represented as *hh* , where *hh* is the hour (0 to 23). The hour of the day is in the time zone of the gateway.
- **MinuteOfHour** *(integer) --*
The minute component of the maintenance start time represented as *mm* , where *mm* is the minute (0 to 59). The minute of the hour is in the time zone of the gateway.
- **DayOfWeek** *(integer) --*
An ordinal number between 0 and 6 that represents the day of the week, where 0 represents Sunday and 6 represents Saturday. The day of week is in the time zone of the gateway.
- **DayOfMonth** *(integer) --*
The day of the month component of the maintenance start time represented as an ordinal number from 1 to 28, where 1 represents the first day of the month and 28 represents the last day of the month.
.. note::
This value is only available for tape and volume gateways.
- **Timezone** *(string) --*
A value that indicates the time zone that is set for the gateway. The start time and day of week specified should be in the time zone of the gateway.
:type GatewayARN: string
:param GatewayARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
:rtype: dict
:returns:
"""
pass
def describe_nfs_file_shares(self, FileShareARNList: List) -> Dict:
"""
Gets a description for one or more Network File System (NFS) file shares from a file gateway. This operation is only supported for file gateways.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DescribeNFSFileShares>`_
**Request Syntax**
::
response = client.describe_nfs_file_shares(
FileShareARNList=[
'string',
]
)
**Response Syntax**
::
{
'NFSFileShareInfoList': [
{
'NFSFileShareDefaults': {
'FileMode': 'string',
'DirectoryMode': 'string',
'GroupId': 123,
'OwnerId': 123
},
'FileShareARN': 'string',
'FileShareId': 'string',
'FileShareStatus': 'string',
'GatewayARN': 'string',
'KMSEncrypted': True|False,
'KMSKey': 'string',
'Path': 'string',
'Role': 'string',
'LocationARN': 'string',
'DefaultStorageClass': 'string',
'ObjectACL': 'private'|'public-read'|'public-read-write'|'authenticated-read'|'bucket-owner-read'|'bucket-owner-full-control'|'aws-exec-read',
'ClientList': [
'string',
],
'Squash': 'string',
'ReadOnly': True|False,
'GuessMIMETypeEnabled': True|False,
'RequesterPays': True|False,
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
]
}
**Response Structure**
- *(dict) --*
DescribeNFSFileSharesOutput
- **NFSFileShareInfoList** *(list) --*
An array containing a description for each requested file share.
- *(dict) --*
The Unix file permissions and ownership information assigned, by default, to native S3 objects when file gateway discovers them in S3 buckets. This operation is only supported in file gateways.
- **NFSFileShareDefaults** *(dict) --*
Describes Network File System (NFS) file share default values. Files and folders stored as Amazon S3 objects in S3 buckets don't, by default, have Unix file permissions assigned to them. Upon discovery in an S3 bucket by Storage Gateway, the S3 objects that represent files and folders are assigned these default Unix permissions. This operation is only supported for file gateways.
- **FileMode** *(string) --*
The Unix file mode in the form "nnnn". For example, "0666" represents the default file mode inside the file share. The default value is 0666.
- **DirectoryMode** *(string) --*
The Unix directory mode in the form "nnnn". For example, "0666" represents the default access mode for all directories inside the file share. The default value is 0777.
- **GroupId** *(integer) --*
The default group ID for the file share (unless the files have another group ID specified). The default value is nfsnobody.
- **OwnerId** *(integer) --*
The default owner ID for files in the file share (unless the files have another owner ID specified). The default value is nfsnobody.
- **FileShareARN** *(string) --*
The Amazon Resource Name (ARN) of the file share.
- **FileShareId** *(string) --*
The ID of the file share.
- **FileShareStatus** *(string) --*
The status of the file share. Possible values are ``CREATING`` , ``UPDATING`` , ``AVAILABLE`` and ``DELETING`` .
- **GatewayARN** *(string) --*
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
- **KMSEncrypted** *(boolean) --*
True to use Amazon S3 server side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.
- **KMSKey** *(string) --*
The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server side encryption. This value can only be set when KMSEncrypted is true. Optional.
- **Path** *(string) --*
The file share path used by the NFS client to identify the mount point.
- **Role** *(string) --*
The ARN of the IAM role that file gateway assumes when it accesses the underlying storage.
- **LocationARN** *(string) --*
The ARN of the backend storage used for storing file data.
- **DefaultStorageClass** *(string) --*
The default storage class for objects put into an Amazon S3 bucket by the file gateway. Possible values are ``S3_STANDARD`` , ``S3_STANDARD_IA`` , or ``S3_ONEZONE_IA`` . If this field is not populated, the default value ``S3_STANDARD`` is used. Optional.
- **ObjectACL** *(string) --*
A value that sets the access control list permission for objects in the S3 bucket that a file gateway puts objects into. The default value is "private".
- **ClientList** *(list) --*
The list of clients that are allowed to access the file gateway. The list must contain either valid IP addresses or valid CIDR blocks.
- *(string) --*
- **Squash** *(string) --*
The user mapped to anonymous user. Valid options are the following:
* ``RootSquash`` - Only root is mapped to anonymous user.
* ``NoSquash`` - No one is mapped to anonymous user
* ``AllSquash`` - Everyone is mapped to anonymous user.
- **ReadOnly** *(boolean) --*
A value that sets the write status of a file share. This value is true if the write status is read-only, and otherwise false.
- **GuessMIMETypeEnabled** *(boolean) --*
A value that enables guessing of the MIME type for uploaded objects based on file extensions. Set this value to true to enable MIME type guessing, and otherwise to false. The default value is true.
- **RequesterPays** *(boolean) --*
A value that sets who pays the cost of the request and the cost associated with data download from the S3 bucket. If this value is set to true, the requester pays the costs. Otherwise the S3 bucket owner pays. However, the S3 bucket owner always pays the cost of storing data.
.. note::
``RequesterPays`` is a configuration for the S3 bucket that backs the file share, so make sure that the configuration on the file share is the same as the S3 bucket configuration.
- **Tags** *(list) --*
A list of up to 50 tags assigned to the NFS file share, sorted alphabetically by key name. Each tag is a key-value pair. For a gateway with more than 10 tags assigned, you can view all tags using the ``ListTagsForResource`` API operation.
- *(dict) --*
A key-value pair that helps you manage, filter, and search for your resource. Allowed characters: letters, white space, and numbers, representable in UTF-8, and the following characters: + - = . _ : /
- **Key** *(string) --*
Tag key (String). The key can't start with aws:.
- **Value** *(string) --*
Value of the tag key.
:type FileShareARNList: list
:param FileShareARNList: **[REQUIRED]**
An array containing the Amazon Resource Name (ARN) of each file share to be described.
- *(string) --*
The Amazon Resource Name (ARN) of the file share.
:rtype: dict
:returns:
"""
pass
def describe_smb_file_shares(self, FileShareARNList: List) -> Dict:
"""
Gets a description for one or more Server Message Block (SMB) file shares from a file gateway. This operation is only supported for file gateways.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DescribeSMBFileShares>`_
**Request Syntax**
::
response = client.describe_smb_file_shares(
FileShareARNList=[
'string',
]
)
**Response Syntax**
::
{
'SMBFileShareInfoList': [
{
'FileShareARN': 'string',
'FileShareId': 'string',
'FileShareStatus': 'string',
'GatewayARN': 'string',
'KMSEncrypted': True|False,
'KMSKey': 'string',
'Path': 'string',
'Role': 'string',
'LocationARN': 'string',
'DefaultStorageClass': 'string',
'ObjectACL': 'private'|'public-read'|'public-read-write'|'authenticated-read'|'bucket-owner-read'|'bucket-owner-full-control'|'aws-exec-read',
'ReadOnly': True|False,
'GuessMIMETypeEnabled': True|False,
'RequesterPays': True|False,
'SMBACLEnabled': True|False,
'ValidUserList': [
'string',
],
'InvalidUserList': [
'string',
],
'Authentication': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
]
}
**Response Structure**
- *(dict) --*
DescribeSMBFileSharesOutput
- **SMBFileShareInfoList** *(list) --*
An array containing a description for each requested file share.
- *(dict) --*
The Windows file permissions and ownership information assigned, by default, to native S3 objects when file gateway discovers them in S3 buckets. This operation is only supported for file gateways.
- **FileShareARN** *(string) --*
The Amazon Resource Name (ARN) of the file share.
- **FileShareId** *(string) --*
The ID of the file share.
- **FileShareStatus** *(string) --*
The status of the file share. Possible values are ``CREATING`` , ``UPDATING`` , ``AVAILABLE`` and ``DELETING`` .
- **GatewayARN** *(string) --*
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
- **KMSEncrypted** *(boolean) --*
True to use Amazon S3 server-side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.
- **KMSKey** *(string) --*
The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server side encryption. This value can only be set when KMSEncrypted is true. Optional.
- **Path** *(string) --*
The file share path used by the SMB client to identify the mount point.
- **Role** *(string) --*
The ARN of the IAM role that file gateway assumes when it accesses the underlying storage.
- **LocationARN** *(string) --*
The ARN of the backend storage used for storing file data.
- **DefaultStorageClass** *(string) --*
The default storage class for objects put into an Amazon S3 bucket by the file gateway. Possible values are ``S3_STANDARD`` , ``S3_STANDARD_IA`` , or ``S3_ONEZONE_IA`` . If this field is not populated, the default value ``S3_STANDARD`` is used. Optional.
- **ObjectACL** *(string) --*
A value that sets the access control list permission for objects in the S3 bucket that a file gateway puts objects into. The default value is "private".
- **ReadOnly** *(boolean) --*
A value that sets the write status of a file share. This value is true if the write status is read-only, and otherwise false.
- **GuessMIMETypeEnabled** *(boolean) --*
A value that enables guessing of the MIME type for uploaded objects based on file extensions. Set this value to true to enable MIME type guessing, and otherwise to false. The default value is true.
- **RequesterPays** *(boolean) --*
A value that sets who pays the cost of the request and the cost associated with data download from the S3 bucket. If this value is set to true, the requester pays the costs. Otherwise the S3 bucket owner pays. However, the S3 bucket owner always pays the cost of storing data.
.. note::
``RequesterPays`` is a configuration for the S3 bucket that backs the file share, so make sure that the configuration on the file share is the same as the S3 bucket configuration.
- **SMBACLEnabled** *(boolean) --*
If this value is set to "true", indicates that ACL (access control list) is enabled on the SMB file share. If it is set to "false", it indicates that file and directory permissions are mapped to the POSIX permission.
- **ValidUserList** *(list) --*
A list of users or groups in the Active Directory that are allowed to access the file share. A group must be prefixed with the @ character. For example ``@group1`` . Can only be set if Authentication is set to ``ActiveDirectory`` .
- *(string) --*
- **InvalidUserList** *(list) --*
A list of users or groups in the Active Directory that are not allowed to access the file share. A group must be prefixed with the @ character. For example ``@group1`` . Can only be set if Authentication is set to ``ActiveDirectory`` .
- *(string) --*
- **Authentication** *(string) --*
The authentication method of the file share.
Valid values are ``ActiveDirectory`` or ``GuestAccess`` . The default is ``ActiveDirectory`` .
- **Tags** *(list) --*
A list of up to 50 tags assigned to the SMB file share, sorted alphabetically by key name. Each tag is a key-value pair. For a gateway with more than 10 tags assigned, you can view all tags using the ``ListTagsForResource`` API operation.
- *(dict) --*
A key-value pair that helps you manage, filter, and search for your resource. Allowed characters: letters, white space, and numbers, representable in UTF-8, and the following characters: + - = . _ : /
- **Key** *(string) --*
Tag key (String). The key can't start with aws:.
- **Value** *(string) --*
Value of the tag key.
:type FileShareARNList: list
:param FileShareARNList: **[REQUIRED]**
An array containing the Amazon Resource Name (ARN) of each file share to be described.
- *(string) --*
The Amazon Resource Name (ARN) of the file share.
:rtype: dict
:returns:
"""
pass
def describe_smb_settings(self, GatewayARN: str) -> Dict:
"""
Gets a description of a Server Message Block (SMB) file share settings from a file gateway. This operation is only supported for file gateways.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DescribeSMBSettings>`_
**Request Syntax**
::
response = client.describe_smb_settings(
GatewayARN='string'
)
**Response Syntax**
::
{
'GatewayARN': 'string',
'DomainName': 'string',
'SMBGuestPasswordSet': True|False
}
**Response Structure**
- *(dict) --*
- **GatewayARN** *(string) --*
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
- **DomainName** *(string) --*
The name of the domain that the gateway is joined to.
- **SMBGuestPasswordSet** *(boolean) --*
This value is true if a password for the guest user “smbguest” is set, and otherwise false.
:type GatewayARN: string
:param GatewayARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
:rtype: dict
:returns:
"""
pass
def describe_snapshot_schedule(self, VolumeARN: str) -> Dict:
"""
Describes the snapshot schedule for the specified gateway volume. The snapshot schedule information includes intervals at which snapshots are automatically initiated on the volume. This operation is only supported in the cached volume and stored volume types.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DescribeSnapshotSchedule>`_
**Request Syntax**
::
response = client.describe_snapshot_schedule(
VolumeARN='string'
)
**Response Syntax**
::
{
'VolumeARN': 'string',
'StartAt': 123,
'RecurrenceInHours': 123,
'Description': 'string',
'Timezone': 'string'
}
**Response Structure**
- *(dict) --*
- **VolumeARN** *(string) --*
The Amazon Resource Name (ARN) of the volume that was specified in the request.
- **StartAt** *(integer) --*
The hour of the day at which the snapshot schedule begins represented as *hh* , where *hh* is the hour (0 to 23). The hour of the day is in the time zone of the gateway.
- **RecurrenceInHours** *(integer) --*
The number of hours between snapshots.
- **Description** *(string) --*
The snapshot description.
- **Timezone** *(string) --*
A value that indicates the time zone of the gateway.
:type VolumeARN: string
:param VolumeARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the volume. Use the ListVolumes operation to return a list of gateway volumes.
:rtype: dict
:returns:
"""
pass
def describe_stored_iscsi_volumes(self, VolumeARNs: List) -> Dict:
"""
Returns the description of the gateway volumes specified in the request. The list of gateway volumes in the request must be from one gateway. In the response Amazon Storage Gateway returns volume information sorted by volume ARNs. This operation is only supported in stored volume gateway type.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DescribeStorediSCSIVolumes>`_
**Request Syntax**
::
response = client.describe_stored_iscsi_volumes(
VolumeARNs=[
'string',
]
)
**Response Syntax**
::
{
'StorediSCSIVolumes': [
{
'VolumeARN': 'string',
'VolumeId': 'string',
'VolumeType': 'string',
'VolumeStatus': 'string',
'VolumeAttachmentStatus': 'string',
'VolumeSizeInBytes': 123,
'VolumeProgress': 123.0,
'VolumeDiskId': 'string',
'SourceSnapshotId': 'string',
'PreservedExistingData': True|False,
'VolumeiSCSIAttributes': {
'TargetARN': 'string',
'NetworkInterfaceId': 'string',
'NetworkInterfacePort': 123,
'LunNumber': 123,
'ChapEnabled': True|False
},
'CreatedDate': datetime(2015, 1, 1),
'VolumeUsedInBytes': 123,
'KMSKey': 'string',
'TargetName': 'string'
},
]
}
**Response Structure**
- *(dict) --*
- **StorediSCSIVolumes** *(list) --*
Describes a single unit of output from DescribeStorediSCSIVolumes . The following fields are returned:
* **ChapEnabled** : Indicates whether mutual CHAP is enabled for the iSCSI target.
* **LunNumber** : The logical disk number.
* **NetworkInterfaceId** : The network interface ID of the stored volume that initiator use to map the stored volume as an iSCSI target.
* **NetworkInterfacePort** : The port used to communicate with iSCSI targets.
* **PreservedExistingData** : Indicates if when the stored volume was created, existing data on the underlying local disk was preserved.
* **SourceSnapshotId** : If the stored volume was created from a snapshot, this field contains the snapshot ID used, e.g. snap-1122aabb. Otherwise, this field is not included.
* **StorediSCSIVolumes** : An array of StorediSCSIVolume objects where each object contains metadata about one stored volume.
* **TargetARN** : The Amazon Resource Name (ARN) of the volume target.
* **VolumeARN** : The Amazon Resource Name (ARN) of the stored volume.
* **VolumeDiskId** : The disk ID of the local disk that was specified in the CreateStorediSCSIVolume operation.
* **VolumeId** : The unique identifier of the storage volume, e.g. vol-1122AABB.
* **VolumeiSCSIAttributes** : An VolumeiSCSIAttributes object that represents a collection of iSCSI attributes for one stored volume.
* **VolumeProgress** : Represents the percentage complete if the volume is restoring or bootstrapping that represents the percent of data transferred. This field does not appear in the response if the stored volume is not restoring or bootstrapping.
* **VolumeSizeInBytes** : The size of the volume in bytes.
* **VolumeStatus** : One of the ``VolumeStatus`` values that indicates the state of the volume.
* **VolumeType** : One of the enumeration values describing the type of the volume. Currently, on STORED volumes are supported.
- *(dict) --*
Describes an iSCSI stored volume.
- **VolumeARN** *(string) --*
The Amazon Resource Name (ARN) of the storage volume.
- **VolumeId** *(string) --*
The unique identifier of the volume, e.g. vol-AE4B946D.
- **VolumeType** *(string) --*
One of the VolumeType enumeration values describing the type of the volume.
- **VolumeStatus** *(string) --*
One of the VolumeStatus values that indicates the state of the storage volume.
- **VolumeAttachmentStatus** *(string) --*
A value that indicates whether a storage volume is attached to, detached from, or is in the process of detaching from a gateway. For more information, see `Moving Your Volumes to a Different Gateway <https://docs.aws.amazon.com/storagegateway/latest/userguide/managing-volumes.html#attach-detach-volume>`__ .
- **VolumeSizeInBytes** *(integer) --*
The size of the volume in bytes.
- **VolumeProgress** *(float) --*
Represents the percentage complete if the volume is restoring or bootstrapping that represents the percent of data transferred. This field does not appear in the response if the stored volume is not restoring or bootstrapping.
- **VolumeDiskId** *(string) --*
The ID of the local disk that was specified in the CreateStorediSCSIVolume operation.
- **SourceSnapshotId** *(string) --*
If the stored volume was created from a snapshot, this field contains the snapshot ID used, e.g. snap-78e22663. Otherwise, this field is not included.
- **PreservedExistingData** *(boolean) --*
Indicates if when the stored volume was created, existing data on the underlying local disk was preserved.
Valid Values: true, false
- **VolumeiSCSIAttributes** *(dict) --*
An VolumeiSCSIAttributes object that represents a collection of iSCSI attributes for one stored volume.
- **TargetARN** *(string) --*
The Amazon Resource Name (ARN) of the volume target.
- **NetworkInterfaceId** *(string) --*
The network interface identifier.
- **NetworkInterfacePort** *(integer) --*
The port used to communicate with iSCSI targets.
- **LunNumber** *(integer) --*
The logical disk number.
- **ChapEnabled** *(boolean) --*
Indicates whether mutual CHAP is enabled for the iSCSI target.
- **CreatedDate** *(datetime) --*
The date the volume was created. Volumes created prior to March 28, 2017 don’t have this time stamp.
- **VolumeUsedInBytes** *(integer) --*
The size of the data stored on the volume in bytes. This value is calculated based on the number of blocks that are touched, instead of the actual amount of data written. This value can be useful for sequential write patterns but less accurate for random write patterns. ``VolumeUsedInBytes`` is different from the compressed size of the volume, which is the value that is used to calculate your bill.
.. note::
This value is not available for volumes created prior to May 13, 2015, until you store data on the volume.
- **KMSKey** *(string) --*
The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server side encryption. This value can only be set when KMSEncrypted is true. Optional.
- **TargetName** *(string) --*
The name of the iSCSI target used by an initiator to connect to a volume and used as a suffix for the target ARN. For example, specifying ``TargetName`` as *myvolume* results in the target ARN of ``arn:aws:storagegateway:us-east-2:111122223333:gateway/sgw-12A3456B/target/iqn.1997-05.com.amazon:myvolume`` . The target name must be unique across all volumes on a gateway.
If you don't specify a value, Storage Gateway uses the value that was previously used for this volume as the new target name.
:type VolumeARNs: list
:param VolumeARNs: **[REQUIRED]**
An array of strings where each string represents the Amazon Resource Name (ARN) of a stored volume. All of the specified stored volumes must from the same gateway. Use ListVolumes to get volume ARNs for a gateway.
- *(string) --*
:rtype: dict
:returns:
"""
pass
def describe_tape_archives(self, TapeARNs: List = None, Marker: str = None, Limit: int = None) -> Dict:
"""
Returns a description of specified virtual tapes in the virtual tape shelf (VTS). This operation is only supported in the tape gateway type.
If a specific ``TapeARN`` is not specified, AWS Storage Gateway returns a description of all virtual tapes found in the VTS associated with your account.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DescribeTapeArchives>`_
**Request Syntax**
::
response = client.describe_tape_archives(
TapeARNs=[
'string',
],
Marker='string',
Limit=123
)
**Response Syntax**
::
{
'TapeArchives': [
{
'TapeARN': 'string',
'TapeBarcode': 'string',
'TapeCreatedDate': datetime(2015, 1, 1),
'TapeSizeInBytes': 123,
'CompletionTime': datetime(2015, 1, 1),
'RetrievedTo': 'string',
'TapeStatus': 'string',
'TapeUsedInBytes': 123,
'KMSKey': 'string',
'PoolId': 'string'
},
],
'Marker': 'string'
}
**Response Structure**
- *(dict) --*
DescribeTapeArchivesOutput
- **TapeArchives** *(list) --*
An array of virtual tape objects in the virtual tape shelf (VTS). The description includes of the Amazon Resource Name (ARN) of the virtual tapes. The information returned includes the Amazon Resource Names (ARNs) of the tapes, size of the tapes, status of the tapes, progress of the description and tape barcode.
- *(dict) --*
Represents a virtual tape that is archived in the virtual tape shelf (VTS).
- **TapeARN** *(string) --*
The Amazon Resource Name (ARN) of an archived virtual tape.
- **TapeBarcode** *(string) --*
The barcode that identifies the archived virtual tape.
- **TapeCreatedDate** *(datetime) --*
The date the virtual tape was created.
- **TapeSizeInBytes** *(integer) --*
The size, in bytes, of the archived virtual tape.
- **CompletionTime** *(datetime) --*
The time that the archiving of the virtual tape was completed.
The default time stamp format is in the ISO8601 extended YYYY-MM-DD'T'HH:MM:SS'Z' format.
- **RetrievedTo** *(string) --*
The Amazon Resource Name (ARN) of the tape gateway that the virtual tape is being retrieved to.
The virtual tape is retrieved from the virtual tape shelf (VTS).
- **TapeStatus** *(string) --*
The current state of the archived virtual tape.
- **TapeUsedInBytes** *(integer) --*
The size, in bytes, of data stored on the virtual tape.
.. note::
This value is not available for tapes created prior to May 13, 2015.
- **KMSKey** *(string) --*
The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server side encryption. This value can only be set when KMSEncrypted is true. Optional.
- **PoolId** *(string) --*
The ID of the pool that was used to archive the tape. The tapes in this pool are archived in the S3 storage class that is associated with the pool.
Valid values: "GLACIER", "DEEP_ARCHIVE"
- **Marker** *(string) --*
An opaque string that indicates the position at which the virtual tapes that were fetched for description ended. Use this marker in your next request to fetch the next set of virtual tapes in the virtual tape shelf (VTS). If there are no more virtual tapes to describe, this field does not appear in the response.
:type TapeARNs: list
:param TapeARNs:
Specifies one or more unique Amazon Resource Names (ARNs) that represent the virtual tapes you want to describe.
- *(string) --*
:type Marker: string
:param Marker:
An opaque string that indicates the position at which to begin describing virtual tapes.
:type Limit: integer
:param Limit:
Specifies that the number of virtual tapes descried be limited to the specified number.
:rtype: dict
:returns:
"""
pass
def describe_tape_recovery_points(self, GatewayARN: str, Marker: str = None, Limit: int = None) -> Dict:
"""
Returns a list of virtual tape recovery points that are available for the specified tape gateway.
A recovery point is a point-in-time view of a virtual tape at which all the data on the virtual tape is consistent. If your gateway crashes, virtual tapes that have recovery points can be recovered to a new gateway. This operation is only supported in the tape gateway type.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DescribeTapeRecoveryPoints>`_
**Request Syntax**
::
response = client.describe_tape_recovery_points(
GatewayARN='string',
Marker='string',
Limit=123
)
**Response Syntax**
::
{
'GatewayARN': 'string',
'TapeRecoveryPointInfos': [
{
'TapeARN': 'string',
'TapeRecoveryPointTime': datetime(2015, 1, 1),
'TapeSizeInBytes': 123,
'TapeStatus': 'string'
},
],
'Marker': 'string'
}
**Response Structure**
- *(dict) --*
DescribeTapeRecoveryPointsOutput
- **GatewayARN** *(string) --*
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
- **TapeRecoveryPointInfos** *(list) --*
An array of TapeRecoveryPointInfos that are available for the specified gateway.
- *(dict) --*
Describes a recovery point.
- **TapeARN** *(string) --*
The Amazon Resource Name (ARN) of the virtual tape.
- **TapeRecoveryPointTime** *(datetime) --*
The time when the point-in-time view of the virtual tape was replicated for later recovery.
The default time stamp format of the tape recovery point time is in the ISO8601 extended YYYY-MM-DD'T'HH:MM:SS'Z' format.
- **TapeSizeInBytes** *(integer) --*
The size, in bytes, of the virtual tapes to recover.
- **TapeStatus** *(string) --*
The status of the virtual tapes.
- **Marker** *(string) --*
An opaque string that indicates the position at which the virtual tape recovery points that were listed for description ended.
Use this marker in your next request to list the next set of virtual tape recovery points in the list. If there are no more recovery points to describe, this field does not appear in the response.
:type GatewayARN: string
:param GatewayARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
:type Marker: string
:param Marker:
An opaque string that indicates the position at which to begin describing the virtual tape recovery points.
:type Limit: integer
:param Limit:
Specifies that the number of virtual tape recovery points that are described be limited to the specified number.
:rtype: dict
:returns:
"""
pass
def describe_tapes(self, GatewayARN: str, TapeARNs: List = None, Marker: str = None, Limit: int = None) -> Dict:
"""
Returns a description of the specified Amazon Resource Name (ARN) of virtual tapes. If a ``TapeARN`` is not specified, returns a description of all virtual tapes associated with the specified gateway. This operation is only supported in the tape gateway type.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DescribeTapes>`_
**Request Syntax**
::
response = client.describe_tapes(
GatewayARN='string',
TapeARNs=[
'string',
],
Marker='string',
Limit=123
)
**Response Syntax**
::
{
'Tapes': [
{
'TapeARN': 'string',
'TapeBarcode': 'string',
'TapeCreatedDate': datetime(2015, 1, 1),
'TapeSizeInBytes': 123,
'TapeStatus': 'string',
'VTLDevice': 'string',
'Progress': 123.0,
'TapeUsedInBytes': 123,
'KMSKey': 'string',
'PoolId': 'string'
},
],
'Marker': 'string'
}
**Response Structure**
- *(dict) --*
DescribeTapesOutput
- **Tapes** *(list) --*
An array of virtual tape descriptions.
- *(dict) --*
Describes a virtual tape object.
- **TapeARN** *(string) --*
The Amazon Resource Name (ARN) of the virtual tape.
- **TapeBarcode** *(string) --*
The barcode that identifies a specific virtual tape.
- **TapeCreatedDate** *(datetime) --*
The date the virtual tape was created.
- **TapeSizeInBytes** *(integer) --*
The size, in bytes, of the virtual tape capacity.
- **TapeStatus** *(string) --*
The current state of the virtual tape.
- **VTLDevice** *(string) --*
The virtual tape library (VTL) device that the virtual tape is associated with.
- **Progress** *(float) --*
For archiving virtual tapes, indicates how much data remains to be uploaded before archiving is complete.
Range: 0 (not started) to 100 (complete).
- **TapeUsedInBytes** *(integer) --*
The size, in bytes, of data stored on the virtual tape.
.. note::
This value is not available for tapes created prior to May 13, 2015.
- **KMSKey** *(string) --*
The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server side encryption. This value can only be set when KMSEncrypted is true. Optional.
- **PoolId** *(string) --*
The ID of the pool that contains tapes that will be archived. The tapes in this pool are archived in the S3 storage class that is associated with the pool. When you use your backup application to eject the tape, the tape is archived directly into the storage class (Glacier or Deep Archive) that corresponds to the pool.
Valid values: "GLACIER", "DEEP_ARCHIVE"
- **Marker** *(string) --*
An opaque string which can be used as part of a subsequent DescribeTapes call to retrieve the next page of results.
If a response does not contain a marker, then there are no more results to be retrieved.
:type GatewayARN: string
:param GatewayARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
:type TapeARNs: list
:param TapeARNs:
Specifies one or more unique Amazon Resource Names (ARNs) that represent the virtual tapes you want to describe. If this parameter is not specified, Tape gateway returns a description of all virtual tapes associated with the specified gateway.
- *(string) --*
:type Marker: string
:param Marker:
A marker value, obtained in a previous call to ``DescribeTapes`` . This marker indicates which page of results to retrieve.
If not specified, the first page of results is retrieved.
:type Limit: integer
:param Limit:
Specifies that the number of virtual tapes described be limited to the specified number.
.. note::
Amazon Web Services may impose its own limit, if this field is not set.
:rtype: dict
:returns:
"""
pass
def describe_upload_buffer(self, GatewayARN: str) -> Dict:
"""
Returns information about the upload buffer of a gateway. This operation is supported for the stored volume, cached volume and tape gateway types.
The response includes disk IDs that are configured as upload buffer space, and it includes the amount of upload buffer space allocated and used.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DescribeUploadBuffer>`_
**Request Syntax**
::
response = client.describe_upload_buffer(
GatewayARN='string'
)
**Response Syntax**
::
{
'GatewayARN': 'string',
'DiskIds': [
'string',
],
'UploadBufferUsedInBytes': 123,
'UploadBufferAllocatedInBytes': 123
}
**Response Structure**
- *(dict) --*
- **GatewayARN** *(string) --*
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
- **DiskIds** *(list) --*
An array of the gateway's local disk IDs that are configured as working storage. Each local disk ID is specified as a string (minimum length of 1 and maximum length of 300). If no local disks are configured as working storage, then the DiskIds array is empty.
- *(string) --*
- **UploadBufferUsedInBytes** *(integer) --*
The total number of bytes being used in the gateway's upload buffer.
- **UploadBufferAllocatedInBytes** *(integer) --*
The total number of bytes allocated in the gateway's as upload buffer.
:type GatewayARN: string
:param GatewayARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
:rtype: dict
:returns:
"""
pass
def describe_vtl_devices(self, GatewayARN: str, VTLDeviceARNs: List = None, Marker: str = None, Limit: int = None) -> Dict:
"""
Returns a description of virtual tape library (VTL) devices for the specified tape gateway. In the response, AWS Storage Gateway returns VTL device information.
This operation is only supported in the tape gateway type.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DescribeVTLDevices>`_
**Request Syntax**
::
response = client.describe_vtl_devices(
GatewayARN='string',
VTLDeviceARNs=[
'string',
],
Marker='string',
Limit=123
)
**Response Syntax**
::
{
'GatewayARN': 'string',
'VTLDevices': [
{
'VTLDeviceARN': 'string',
'VTLDeviceType': 'string',
'VTLDeviceVendor': 'string',
'VTLDeviceProductIdentifier': 'string',
'DeviceiSCSIAttributes': {
'TargetARN': 'string',
'NetworkInterfaceId': 'string',
'NetworkInterfacePort': 123,
'ChapEnabled': True|False
}
},
],
'Marker': 'string'
}
**Response Structure**
- *(dict) --*
DescribeVTLDevicesOutput
- **GatewayARN** *(string) --*
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
- **VTLDevices** *(list) --*
An array of VTL device objects composed of the Amazon Resource Name(ARN) of the VTL devices.
- *(dict) --*
Represents a device object associated with a tape gateway.
- **VTLDeviceARN** *(string) --*
Specifies the unique Amazon Resource Name (ARN) of the device (tape drive or media changer).
- **VTLDeviceType** *(string) --*
Specifies the type of device that the VTL device emulates.
- **VTLDeviceVendor** *(string) --*
Specifies the vendor of the device that the VTL device object emulates.
- **VTLDeviceProductIdentifier** *(string) --*
Specifies the model number of device that the VTL device emulates.
- **DeviceiSCSIAttributes** *(dict) --*
A list of iSCSI information about a VTL device.
- **TargetARN** *(string) --*
Specifies the unique Amazon Resource Name (ARN) that encodes the iSCSI qualified name(iqn) of a tape drive or media changer target.
- **NetworkInterfaceId** *(string) --*
The network interface identifier of the VTL device.
- **NetworkInterfacePort** *(integer) --*
The port used to communicate with iSCSI VTL device targets.
- **ChapEnabled** *(boolean) --*
Indicates whether mutual CHAP is enabled for the iSCSI target.
- **Marker** *(string) --*
An opaque string that indicates the position at which the VTL devices that were fetched for description ended. Use the marker in your next request to fetch the next set of VTL devices in the list. If there are no more VTL devices to describe, this field does not appear in the response.
:type GatewayARN: string
:param GatewayARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
:type VTLDeviceARNs: list
:param VTLDeviceARNs:
An array of strings, where each string represents the Amazon Resource Name (ARN) of a VTL device.
.. note::
All of the specified VTL devices must be from the same gateway. If no VTL devices are specified, the result will contain all devices on the specified gateway.
- *(string) --*
:type Marker: string
:param Marker:
An opaque string that indicates the position at which to begin describing the VTL devices.
:type Limit: integer
:param Limit:
Specifies that the number of VTL devices described be limited to the specified number.
:rtype: dict
:returns:
"""
pass
def describe_working_storage(self, GatewayARN: str) -> Dict:
"""
Returns information about the working storage of a gateway. This operation is only supported in the stored volumes gateway type. This operation is deprecated in cached volumes API version (20120630). Use DescribeUploadBuffer instead.
.. note::
Working storage is also referred to as upload buffer. You can also use the DescribeUploadBuffer operation to add upload buffer to a stored volume gateway.
The response includes disk IDs that are configured as working storage, and it includes the amount of working storage allocated and used.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DescribeWorkingStorage>`_
**Request Syntax**
::
response = client.describe_working_storage(
GatewayARN='string'
)
**Response Syntax**
::
{
'GatewayARN': 'string',
'DiskIds': [
'string',
],
'WorkingStorageUsedInBytes': 123,
'WorkingStorageAllocatedInBytes': 123
}
**Response Structure**
- *(dict) --*
A JSON object containing the following fields:
- **GatewayARN** *(string) --*
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
- **DiskIds** *(list) --*
An array of the gateway's local disk IDs that are configured as working storage. Each local disk ID is specified as a string (minimum length of 1 and maximum length of 300). If no local disks are configured as working storage, then the DiskIds array is empty.
- *(string) --*
- **WorkingStorageUsedInBytes** *(integer) --*
The total working storage in bytes in use by the gateway. If no working storage is configured for the gateway, this field returns 0.
- **WorkingStorageAllocatedInBytes** *(integer) --*
The total working storage in bytes allocated for the gateway. If no working storage is configured for the gateway, this field returns 0.
:type GatewayARN: string
:param GatewayARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
:rtype: dict
:returns:
"""
pass
def detach_volume(self, VolumeARN: str, ForceDetach: bool = None) -> Dict:
"""
Disconnects a volume from an iSCSI connection and then detaches the volume from the specified gateway. Detaching and attaching a volume enables you to recover your data from one gateway to a different gateway without creating a snapshot. It also makes it easier to move your volumes from an on-premises gateway to a gateway hosted on an Amazon EC2 instance.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DetachVolume>`_
**Request Syntax**
::
response = client.detach_volume(
VolumeARN='string',
ForceDetach=True|False
)
**Response Syntax**
::
{
'VolumeARN': 'string'
}
**Response Structure**
- *(dict) --*
AttachVolumeOutput
- **VolumeARN** *(string) --*
The Amazon Resource Name (ARN) of the volume that was detached.
:type VolumeARN: string
:param VolumeARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the volume to detach from the gateway.
:type ForceDetach: boolean
:param ForceDetach:
Set to ``true`` to forcibly remove the iSCSI connection of the target volume and detach the volume. The default is ``false`` . If this value is set to ``false`` , you must manually disconnect the iSCSI connection from the target volume.
:rtype: dict
:returns:
"""
pass
def disable_gateway(self, GatewayARN: str) -> Dict:
"""
Disables a tape gateway when the gateway is no longer functioning. For example, if your gateway VM is damaged, you can disable the gateway so you can recover virtual tapes.
Use this operation for a tape gateway that is not reachable or not functioning. This operation is only supported in the tape gateway type.
.. warning::
Once a gateway is disabled it cannot be enabled.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DisableGateway>`_
**Request Syntax**
::
response = client.disable_gateway(
GatewayARN='string'
)
**Response Syntax**
::
{
'GatewayARN': 'string'
}
**Response Structure**
- *(dict) --*
DisableGatewayOutput
- **GatewayARN** *(string) --*
The unique Amazon Resource Name (ARN) of the disabled gateway.
:type GatewayARN: string
:param GatewayARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
:rtype: dict
:returns:
"""
pass
def generate_presigned_url(self, ClientMethod: str = None, Params: Dict = None, ExpiresIn: int = None, HttpMethod: str = None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to
``ClientMethod``.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid
for. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By
default, the http method is whatever is used in the method\'s model.
:returns: The presigned url
"""
pass
def get_paginator(self, operation_name: str = None) -> Paginator:
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you\'d normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator(\"create_foo\")``.
:raise OperationNotPageableError: Raised if the operation is not
pageable. You can use the ``client.can_paginate`` method to
check if an operation is pageable.
:rtype: L{botocore.paginate.Paginator}
:return: A paginator object.
"""
pass
def get_waiter(self, waiter_name: str = None) -> Waiter:
"""
Returns an object that can wait for some condition.
:type waiter_name: str
:param waiter_name: The name of the waiter to get. See the waiters
section of the service docs for a list of available waiters.
:returns: The specified waiter object.
:rtype: botocore.waiter.Waiter
"""
pass
def join_domain(self, GatewayARN: str, DomainName: str, UserName: str, Password: str, OrganizationalUnit: str = None, DomainControllers: List = None) -> Dict:
"""
Adds a file gateway to an Active Directory domain. This operation is only supported for file gateways that support the SMB file protocol.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/JoinDomain>`_
**Request Syntax**
::
response = client.join_domain(
GatewayARN='string',
DomainName='string',
OrganizationalUnit='string',
DomainControllers=[
'string',
],
UserName='string',
Password='string'
)
**Response Syntax**
::
{
'GatewayARN': 'string'
}
**Response Structure**
- *(dict) --*
JoinDomainOutput
- **GatewayARN** *(string) --*
The unique Amazon Resource Name (ARN) of the gateway that joined the domain.
:type GatewayARN: string
:param GatewayARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the gateway. Use the ``ListGateways`` operation to return a list of gateways for your account and region.
:type DomainName: string
:param DomainName: **[REQUIRED]**
The name of the domain that you want the gateway to join.
:type OrganizationalUnit: string
:param OrganizationalUnit:
The organizational unit (OU) is a container with an Active Directory that can hold users, groups, computers, and other OUs and this parameter specifies the OU that the gateway will join within the AD domain.
:type DomainControllers: list
:param DomainControllers:
List of IPv4 addresses, NetBIOS names, or host names of your domain server. If you need to specify the port number include it after the colon (“:”). For example, ``mydc.mydomain.com:389`` .
- *(string) --*
:type UserName: string
:param UserName: **[REQUIRED]**
Sets the user name of user who has permission to add the gateway to the Active Directory domain.
:type Password: string
:param Password: **[REQUIRED]**
Sets the password of the user who has permission to add the gateway to the Active Directory domain.
:rtype: dict
:returns:
"""
pass
def list_file_shares(self, GatewayARN: str = None, Limit: int = None, Marker: str = None) -> Dict:
"""
Gets a list of the file shares for a specific file gateway, or the list of file shares that belong to the calling user account. This operation is only supported for file gateways.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/ListFileShares>`_
**Request Syntax**
::
response = client.list_file_shares(
GatewayARN='string',
Limit=123,
Marker='string'
)
**Response Syntax**
::
{
'Marker': 'string',
'NextMarker': 'string',
'FileShareInfoList': [
{
'FileShareType': 'NFS'|'SMB',
'FileShareARN': 'string',
'FileShareId': 'string',
'FileShareStatus': 'string',
'GatewayARN': 'string'
},
]
}
**Response Structure**
- *(dict) --*
ListFileShareOutput
- **Marker** *(string) --*
If the request includes ``Marker`` , the response returns that value in this field.
- **NextMarker** *(string) --*
If a value is present, there are more file shares to return. In a subsequent request, use ``NextMarker`` as the value for ``Marker`` to retrieve the next set of file shares.
- **FileShareInfoList** *(list) --*
An array of information about the file gateway's file shares.
- *(dict) --*
Describes a file share.
- **FileShareType** *(string) --*
The type of the file share.
- **FileShareARN** *(string) --*
The Amazon Resource Name (ARN) of the file share.
- **FileShareId** *(string) --*
The ID of the file share.
- **FileShareStatus** *(string) --*
The status of the file share. Possible values are ``CREATING`` , ``UPDATING`` , ``AVAILABLE`` and ``DELETING`` .
- **GatewayARN** *(string) --*
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
:type GatewayARN: string
:param GatewayARN:
The Amazon resource Name (ARN) of the gateway whose file shares you want to list. If this field is not present, all file shares under your account are listed.
:type Limit: integer
:param Limit:
The maximum number of file shares to return in the response. The value must be an integer with a value greater than zero. Optional.
:type Marker: string
:param Marker:
Opaque pagination token returned from a previous ListFileShares operation. If present, ``Marker`` specifies where to continue the list from after a previous call to ListFileShares. Optional.
:rtype: dict
:returns:
"""
pass
def list_gateways(self, Marker: str = None, Limit: int = None) -> Dict:
"""
Lists gateways owned by an AWS account in a region specified in the request. The returned list is ordered by gateway Amazon Resource Name (ARN).
By default, the operation returns a maximum of 100 gateways. This operation supports pagination that allows you to optionally reduce the number of gateways returned in a response.
If you have more gateways than are returned in a response (that is, the response returns only a truncated list of your gateways), the response contains a marker that you can specify in your next request to fetch the next page of gateways.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/ListGateways>`_
**Request Syntax**
::
response = client.list_gateways(
Marker='string',
Limit=123
)
**Response Syntax**
::
{
'Gateways': [
{
'GatewayId': 'string',
'GatewayARN': 'string',
'GatewayType': 'string',
'GatewayOperationalState': 'string',
'GatewayName': 'string',
'Ec2InstanceId': 'string',
'Ec2InstanceRegion': 'string'
},
],
'Marker': 'string'
}
**Response Structure**
- *(dict) --*
- **Gateways** *(list) --*
An array of GatewayInfo objects.
- *(dict) --*
Describes a gateway object.
- **GatewayId** *(string) --*
The unique identifier assigned to your gateway during activation. This ID becomes part of the gateway Amazon Resource Name (ARN), which you use as input for other operations.
- **GatewayARN** *(string) --*
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
- **GatewayType** *(string) --*
The type of the gateway.
- **GatewayOperationalState** *(string) --*
The state of the gateway.
Valid Values: DISABLED or ACTIVE
- **GatewayName** *(string) --*
The name of the gateway.
- **Ec2InstanceId** *(string) --*
The ID of the Amazon EC2 instance that was used to launch the gateway.
- **Ec2InstanceRegion** *(string) --*
The AWS Region where the Amazon EC2 instance is located.
- **Marker** *(string) --*
Use the marker in your next request to fetch the next set of gateways in the list. If there are no more gateways to list, this field does not appear in the response.
:type Marker: string
:param Marker:
An opaque string that indicates the position at which to begin the returned list of gateways.
:type Limit: integer
:param Limit:
Specifies that the list of gateways returned be limited to the specified number of items.
:rtype: dict
:returns:
"""
pass
def list_local_disks(self, GatewayARN: str) -> Dict:
"""
Returns a list of the gateway's local disks. To specify which gateway to describe, you use the Amazon Resource Name (ARN) of the gateway in the body of the request.
The request returns a list of all disks, specifying which are configured as working storage, cache storage, or stored volume or not configured at all. The response includes a ``DiskStatus`` field. This field can have a value of present (the disk is available to use), missing (the disk is no longer connected to the gateway), or mismatch (the disk node is occupied by a disk that has incorrect metadata or the disk content is corrupted).
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/ListLocalDisks>`_
**Request Syntax**
::
response = client.list_local_disks(
GatewayARN='string'
)
**Response Syntax**
::
{
'GatewayARN': 'string',
'Disks': [
{
'DiskId': 'string',
'DiskPath': 'string',
'DiskNode': 'string',
'DiskStatus': 'string',
'DiskSizeInBytes': 123,
'DiskAllocationType': 'string',
'DiskAllocationResource': 'string',
'DiskAttributeList': [
'string',
]
},
]
}
**Response Structure**
- *(dict) --*
- **GatewayARN** *(string) --*
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
- **Disks** *(list) --*
A JSON object containing the following fields:
* ListLocalDisksOutput$Disks
- *(dict) --*
Represents a gateway's local disk.
- **DiskId** *(string) --*
The unique device ID or other distinguishing data that identifies a local disk.
- **DiskPath** *(string) --*
The path of a local disk in the gateway virtual machine (VM).
- **DiskNode** *(string) --*
The device node of a local disk as assigned by the virtualization environment.
- **DiskStatus** *(string) --*
A value that represents the status of a local disk.
- **DiskSizeInBytes** *(integer) --*
The local disk size in bytes.
- **DiskAllocationType** *(string) --*
One of the ``DiskAllocationType`` enumeration values that identifies how a local disk is used. Valid values: ``UPLOAD_BUFFER`` , ``CACHE_STORAGE``
- **DiskAllocationResource** *(string) --*
The iSCSI qualified name (IQN) that is defined for a disk. This field is not included in the response if the local disk is not defined as an iSCSI target. The format of this field is *targetIqn::LUNNumber::region-volumeId* .
- **DiskAttributeList** *(list) --*
A list of values that represents attributes of a local disk.
- *(string) --*
:type GatewayARN: string
:param GatewayARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
:rtype: dict
:returns:
"""
pass
def list_tags_for_resource(self, ResourceARN: str, Marker: str = None, Limit: int = None) -> Dict:
"""
Lists the tags that have been added to the specified resource. This operation is only supported in the cached volume, stored volume and tape gateway type.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/ListTagsForResource>`_
**Request Syntax**
::
response = client.list_tags_for_resource(
ResourceARN='string',
Marker='string',
Limit=123
)
**Response Syntax**
::
{
'ResourceARN': 'string',
'Marker': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
**Response Structure**
- *(dict) --*
ListTagsForResourceOutput
- **ResourceARN** *(string) --*
he Amazon Resource Name (ARN) of the resource for which you want to list tags.
- **Marker** *(string) --*
An opaque string that indicates the position at which to stop returning the list of tags.
- **Tags** *(list) --*
An array that contains the tags for the specified resource.
- *(dict) --*
A key-value pair that helps you manage, filter, and search for your resource. Allowed characters: letters, white space, and numbers, representable in UTF-8, and the following characters: + - = . _ : /
- **Key** *(string) --*
Tag key (String). The key can't start with aws:.
- **Value** *(string) --*
Value of the tag key.
:type ResourceARN: string
:param ResourceARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the resource for which you want to list tags.
:type Marker: string
:param Marker:
An opaque string that indicates the position at which to begin returning the list of tags.
:type Limit: integer
:param Limit:
Specifies that the list of tags returned be limited to the specified number of items.
:rtype: dict
:returns:
"""
pass
def list_tapes(self, TapeARNs: List = None, Marker: str = None, Limit: int = None) -> Dict:
"""
Lists virtual tapes in your virtual tape library (VTL) and your virtual tape shelf (VTS). You specify the tapes to list by specifying one or more tape Amazon Resource Names (ARNs). If you don't specify a tape ARN, the operation lists all virtual tapes in both your VTL and VTS.
This operation supports pagination. By default, the operation returns a maximum of up to 100 tapes. You can optionally specify the ``Limit`` parameter in the body to limit the number of tapes in the response. If the number of tapes returned in the response is truncated, the response includes a ``Marker`` element that you can use in your subsequent request to retrieve the next set of tapes. This operation is only supported in the tape gateway type.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/ListTapes>`_
**Request Syntax**
::
response = client.list_tapes(
TapeARNs=[
'string',
],
Marker='string',
Limit=123
)
**Response Syntax**
::
{
'TapeInfos': [
{
'TapeARN': 'string',
'TapeBarcode': 'string',
'TapeSizeInBytes': 123,
'TapeStatus': 'string',
'GatewayARN': 'string',
'PoolId': 'string'
},
],
'Marker': 'string'
}
**Response Structure**
- *(dict) --*
A JSON object containing the following fields:
* ListTapesOutput$Marker
* ListTapesOutput$VolumeInfos
- **TapeInfos** *(list) --*
An array of TapeInfo objects, where each object describes an a single tape. If there not tapes in the tape library or VTS, then the ``TapeInfos`` is an empty array.
- *(dict) --*
Describes a virtual tape.
- **TapeARN** *(string) --*
The Amazon Resource Name (ARN) of a virtual tape.
- **TapeBarcode** *(string) --*
The barcode that identifies a specific virtual tape.
- **TapeSizeInBytes** *(integer) --*
The size, in bytes, of a virtual tape.
- **TapeStatus** *(string) --*
The status of the tape.
- **GatewayARN** *(string) --*
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
- **PoolId** *(string) --*
The ID of the pool that you want to add your tape to for archiving. The tape in this pool is archived in the S3 storage class that is associated with the pool. When you use your backup application to eject the tape, the tape is archived directly into the storage class (Glacier or Deep Archive) that corresponds to the pool.
Valid values: "GLACIER", "DEEP_ARCHIVE"
- **Marker** *(string) --*
A string that indicates the position at which to begin returning the next list of tapes. Use the marker in your next request to continue pagination of tapes. If there are no more tapes to list, this element does not appear in the response body.
:type TapeARNs: list
:param TapeARNs:
The Amazon Resource Name (ARN) of each of the tapes you want to list. If you don\'t specify a tape ARN, the response lists all tapes in both your VTL and VTS.
- *(string) --*
:type Marker: string
:param Marker:
A string that indicates the position at which to begin the returned list of tapes.
:type Limit: integer
:param Limit:
An optional number limit for the tapes in the list returned by this call.
:rtype: dict
:returns:
"""
pass
def list_volume_initiators(self, VolumeARN: str) -> Dict:
"""
Lists iSCSI initiators that are connected to a volume. You can use this operation to determine whether a volume is being used or not. This operation is only supported in the cached volume and stored volume gateway types.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/ListVolumeInitiators>`_
**Request Syntax**
::
response = client.list_volume_initiators(
VolumeARN='string'
)
**Response Syntax**
::
{
'Initiators': [
'string',
]
}
**Response Structure**
- *(dict) --*
ListVolumeInitiatorsOutput
- **Initiators** *(list) --*
The host names and port numbers of all iSCSI initiators that are connected to the gateway.
- *(string) --*
:type VolumeARN: string
:param VolumeARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the volume. Use the ListVolumes operation to return a list of gateway volumes for the gateway.
:rtype: dict
:returns:
"""
pass
def list_volume_recovery_points(self, GatewayARN: str) -> Dict:
"""
Lists the recovery points for a specified gateway. This operation is only supported in the cached volume gateway type.
Each cache volume has one recovery point. A volume recovery point is a point in time at which all data of the volume is consistent and from which you can create a snapshot or clone a new cached volume from a source volume. To create a snapshot from a volume recovery point use the CreateSnapshotFromVolumeRecoveryPoint operation.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/ListVolumeRecoveryPoints>`_
**Request Syntax**
::
response = client.list_volume_recovery_points(
GatewayARN='string'
)
**Response Syntax**
::
{
'GatewayARN': 'string',
'VolumeRecoveryPointInfos': [
{
'VolumeARN': 'string',
'VolumeSizeInBytes': 123,
'VolumeUsageInBytes': 123,
'VolumeRecoveryPointTime': 'string'
},
]
}
**Response Structure**
- *(dict) --*
- **GatewayARN** *(string) --*
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
- **VolumeRecoveryPointInfos** *(list) --*
An array of VolumeRecoveryPointInfo objects.
- *(dict) --*
Describes a storage volume recovery point object.
- **VolumeARN** *(string) --*
The Amazon Resource Name (ARN) of the volume target.
- **VolumeSizeInBytes** *(integer) --*
The size of the volume in bytes.
- **VolumeUsageInBytes** *(integer) --*
The size of the data stored on the volume in bytes.
.. note::
This value is not available for volumes created prior to May 13, 2015, until you store data on the volume.
- **VolumeRecoveryPointTime** *(string) --*
The time the recovery point was taken.
:type GatewayARN: string
:param GatewayARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
:rtype: dict
:returns:
"""
pass
def list_volumes(self, GatewayARN: str = None, Marker: str = None, Limit: int = None) -> Dict:
"""
Lists the iSCSI stored volumes of a gateway. Results are sorted by volume ARN. The response includes only the volume ARNs. If you want additional volume information, use the DescribeStorediSCSIVolumes or the DescribeCachediSCSIVolumes API.
The operation supports pagination. By default, the operation returns a maximum of up to 100 volumes. You can optionally specify the ``Limit`` field in the body to limit the number of volumes in the response. If the number of volumes returned in the response is truncated, the response includes a Marker field. You can use this Marker value in your subsequent request to retrieve the next set of volumes. This operation is only supported in the cached volume and stored volume gateway types.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/ListVolumes>`_
**Request Syntax**
::
response = client.list_volumes(
GatewayARN='string',
Marker='string',
Limit=123
)
**Response Syntax**
::
{
'GatewayARN': 'string',
'Marker': 'string',
'VolumeInfos': [
{
'VolumeARN': 'string',
'VolumeId': 'string',
'GatewayARN': 'string',
'GatewayId': 'string',
'VolumeType': 'string',
'VolumeSizeInBytes': 123,
'VolumeAttachmentStatus': 'string'
},
]
}
**Response Structure**
- *(dict) --*
A JSON object containing the following fields:
* ListVolumesOutput$Marker
* ListVolumesOutput$VolumeInfos
- **GatewayARN** *(string) --*
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
- **Marker** *(string) --*
Use the marker in your next request to continue pagination of iSCSI volumes. If there are no more volumes to list, this field does not appear in the response body.
- **VolumeInfos** *(list) --*
An array of VolumeInfo objects, where each object describes an iSCSI volume. If no volumes are defined for the gateway, then ``VolumeInfos`` is an empty array "[]".
- *(dict) --*
Describes a storage volume object.
- **VolumeARN** *(string) --*
The Amazon Resource Name (ARN) for the storage volume. For example, the following is a valid ARN:
``arn:aws:storagegateway:us-east-2:111122223333:gateway/sgw-12A3456B/volume/vol-1122AABB``
Valid Values: 50 to 500 lowercase letters, numbers, periods (.), and hyphens (-).
- **VolumeId** *(string) --*
The unique identifier assigned to the volume. This ID becomes part of the volume Amazon Resource Name (ARN), which you use as input for other operations.
Valid Values: 50 to 500 lowercase letters, numbers, periods (.), and hyphens (-).
- **GatewayARN** *(string) --*
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
- **GatewayId** *(string) --*
The unique identifier assigned to your gateway during activation. This ID becomes part of the gateway Amazon Resource Name (ARN), which you use as input for other operations.
Valid Values: 50 to 500 lowercase letters, numbers, periods (.), and hyphens (-).
- **VolumeType** *(string) --*
One of the VolumeType enumeration values describing the type of the volume.
- **VolumeSizeInBytes** *(integer) --*
The size of the volume in bytes.
Valid Values: 50 to 500 lowercase letters, numbers, periods (.), and hyphens (-).
- **VolumeAttachmentStatus** *(string) --*
One of the VolumeStatus values that indicates the state of the storage volume.
:type GatewayARN: string
:param GatewayARN:
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
:type Marker: string
:param Marker:
A string that indicates the position at which to begin the returned list of volumes. Obtain the marker from the response of a previous List iSCSI Volumes request.
:type Limit: integer
:param Limit:
Specifies that the list of volumes returned be limited to the specified number of items.
:rtype: dict
:returns:
"""
pass
def notify_when_uploaded(self, FileShareARN: str) -> Dict:
"""
Sends you notification through CloudWatch Events when all files written to your NFS file share have been uploaded to Amazon S3.
AWS Storage Gateway can send a notification through Amazon CloudWatch Events when all files written to your file share up to that point in time have been uploaded to Amazon S3. These files include files written to the NFS file share up to the time that you make a request for notification. When the upload is done, Storage Gateway sends you notification through an Amazon CloudWatch Event. You can configure CloudWatch Events to send the notification through event targets such as Amazon SNS or AWS Lambda function. This operation is only supported for file gateways.
For more information, see Getting File Upload Notification in the Storage Gateway User Guide (https://docs.aws.amazon.com/storagegateway/latest/userguide/monitoring-file-gateway.html#get-upload-notification).
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/NotifyWhenUploaded>`_
**Request Syntax**
::
response = client.notify_when_uploaded(
FileShareARN='string'
)
**Response Syntax**
::
{
'FileShareARN': 'string',
'NotificationId': 'string'
}
**Response Structure**
- *(dict) --*
- **FileShareARN** *(string) --*
The Amazon Resource Name (ARN) of the file share.
- **NotificationId** *(string) --*
The randomly generated ID of the notification that was sent. This ID is in UUID format.
:type FileShareARN: string
:param FileShareARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the file share.
:rtype: dict
:returns:
"""
pass
def refresh_cache(self, FileShareARN: str, FolderList: List = None, Recursive: bool = None) -> Dict:
"""
Refreshes the cache for the specified file share. This operation finds objects in the Amazon S3 bucket that were added, removed or replaced since the gateway last listed the bucket's contents and cached the results. This operation is only supported in the file gateway type. You can subscribe to be notified through an Amazon CloudWatch event when your RefreshCache operation completes. For more information, see `Getting Notified About File Operations <https://docs.aws.amazon.com/storagegateway/latest/userguide/monitoring-file-gateway.html#get-notification>`__ .
When this API is called, it only initiates the refresh operation. When the API call completes and returns a success code, it doesn't necessarily mean that the file refresh has completed. You should use the refresh-complete notification to determine that the operation has completed before you check for new files on the gateway file share. You can subscribe to be notified through an CloudWatch event when your ``RefreshCache`` operation completes.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/RefreshCache>`_
**Request Syntax**
::
response = client.refresh_cache(
FileShareARN='string',
FolderList=[
'string',
],
Recursive=True|False
)
**Response Syntax**
::
{
'FileShareARN': 'string',
'NotificationId': 'string'
}
**Response Structure**
- *(dict) --*
RefreshCacheOutput
- **FileShareARN** *(string) --*
The Amazon Resource Name (ARN) of the file share.
- **NotificationId** *(string) --*
The randomly generated ID of the notification that was sent. This ID is in UUID format.
:type FileShareARN: string
:param FileShareARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the file share you want to refresh.
:type FolderList: list
:param FolderList:
A comma-separated list of the paths of folders to refresh in the cache. The default is [``\"/\"`` ]. The default refreshes objects and folders at the root of the Amazon S3 bucket. If ``Recursive`` is set to \"true\", the entire S3 bucket that the file share has access to is refreshed.
- *(string) --*
:type Recursive: boolean
:param Recursive:
A value that specifies whether to recursively refresh folders in the cache. The refresh includes folders that were in the cache the last time the gateway listed the folder\'s contents. If this value set to \"true\", each folder that is listed in ``FolderList`` is recursively updated. Otherwise, subfolders listed in ``FolderList`` are not refreshed. Only objects that are in folders listed directly under ``FolderList`` are found and used for the update. The default is \"true\".
:rtype: dict
:returns:
"""
pass
def remove_tags_from_resource(self, ResourceARN: str, TagKeys: List) -> Dict:
"""
Removes one or more tags from the specified resource. This operation is only supported in the cached volume, stored volume and tape gateway types.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/RemoveTagsFromResource>`_
**Request Syntax**
::
response = client.remove_tags_from_resource(
ResourceARN='string',
TagKeys=[
'string',
]
)
**Response Syntax**
::
{
'ResourceARN': 'string'
}
**Response Structure**
- *(dict) --*
RemoveTagsFromResourceOutput
- **ResourceARN** *(string) --*
The Amazon Resource Name (ARN) of the resource that the tags were removed from.
:type ResourceARN: string
:param ResourceARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the resource you want to remove the tags from.
:type TagKeys: list
:param TagKeys: **[REQUIRED]**
The keys of the tags you want to remove from the specified resource. A tag is composed of a key/value pair.
- *(string) --*
:rtype: dict
:returns:
"""
pass
def reset_cache(self, GatewayARN: str) -> Dict:
"""
Resets all cache disks that have encountered a error and makes the disks available for reconfiguration as cache storage. If your cache disk encounters a error, the gateway prevents read and write operations on virtual tapes in the gateway. For example, an error can occur when a disk is corrupted or removed from the gateway. When a cache is reset, the gateway loses its cache storage. At this point you can reconfigure the disks as cache disks. This operation is only supported in the cached volume and tape types.
.. warning::
If the cache disk you are resetting contains data that has not been uploaded to Amazon S3 yet, that data can be lost. After you reset cache disks, there will be no configured cache disks left in the gateway, so you must configure at least one new cache disk for your gateway to function properly.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/ResetCache>`_
**Request Syntax**
::
response = client.reset_cache(
GatewayARN='string'
)
**Response Syntax**
::
{
'GatewayARN': 'string'
}
**Response Structure**
- *(dict) --*
- **GatewayARN** *(string) --*
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
:type GatewayARN: string
:param GatewayARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
:rtype: dict
:returns:
"""
pass
def retrieve_tape_archive(self, TapeARN: str, GatewayARN: str) -> Dict:
"""
Retrieves an archived virtual tape from the virtual tape shelf (VTS) to a tape gateway. Virtual tapes archived in the VTS are not associated with any gateway. However after a tape is retrieved, it is associated with a gateway, even though it is also listed in the VTS, that is, archive. This operation is only supported in the tape gateway type.
Once a tape is successfully retrieved to a gateway, it cannot be retrieved again to another gateway. You must archive the tape again before you can retrieve it to another gateway. This operation is only supported in the tape gateway type.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/RetrieveTapeArchive>`_
**Request Syntax**
::
response = client.retrieve_tape_archive(
TapeARN='string',
GatewayARN='string'
)
**Response Syntax**
::
{
'TapeARN': 'string'
}
**Response Structure**
- *(dict) --*
RetrieveTapeArchiveOutput
- **TapeARN** *(string) --*
The Amazon Resource Name (ARN) of the retrieved virtual tape.
:type TapeARN: string
:param TapeARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the virtual tape you want to retrieve from the virtual tape shelf (VTS).
:type GatewayARN: string
:param GatewayARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the gateway you want to retrieve the virtual tape to. Use the ListGateways operation to return a list of gateways for your account and region.
You retrieve archived virtual tapes to only one gateway and the gateway must be a tape gateway.
:rtype: dict
:returns:
"""
pass
def retrieve_tape_recovery_point(self, TapeARN: str, GatewayARN: str) -> Dict:
"""
Retrieves the recovery point for the specified virtual tape. This operation is only supported in the tape gateway type.
A recovery point is a point in time view of a virtual tape at which all the data on the tape is consistent. If your gateway crashes, virtual tapes that have recovery points can be recovered to a new gateway.
.. note::
The virtual tape can be retrieved to only one gateway. The retrieved tape is read-only. The virtual tape can be retrieved to only a tape gateway. There is no charge for retrieving recovery points.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/RetrieveTapeRecoveryPoint>`_
**Request Syntax**
::
response = client.retrieve_tape_recovery_point(
TapeARN='string',
GatewayARN='string'
)
**Response Syntax**
::
{
'TapeARN': 'string'
}
**Response Structure**
- *(dict) --*
RetrieveTapeRecoveryPointOutput
- **TapeARN** *(string) --*
The Amazon Resource Name (ARN) of the virtual tape for which the recovery point was retrieved.
:type TapeARN: string
:param TapeARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the virtual tape for which you want to retrieve the recovery point.
:type GatewayARN: string
:param GatewayARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
:rtype: dict
:returns:
"""
pass
def set_local_console_password(self, GatewayARN: str, LocalConsolePassword: str) -> Dict:
"""
Sets the password for your VM local console. When you log in to the local console for the first time, you log in to the VM with the default credentials. We recommend that you set a new password. You don't need to know the default password to set a new password.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/SetLocalConsolePassword>`_
**Request Syntax**
::
response = client.set_local_console_password(
GatewayARN='string',
LocalConsolePassword='string'
)
**Response Syntax**
::
{
'GatewayARN': 'string'
}
**Response Structure**
- *(dict) --*
- **GatewayARN** *(string) --*
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
:type GatewayARN: string
:param GatewayARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
:type LocalConsolePassword: string
:param LocalConsolePassword: **[REQUIRED]**
The password you want to set for your VM local console.
:rtype: dict
:returns:
"""
pass
def set_smb_guest_password(self, GatewayARN: str, Password: str) -> Dict:
"""
Sets the password for the guest user ``smbguest`` . The ``smbguest`` user is the user when the authentication method for the file share is set to ``GuestAccess`` .
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/SetSMBGuestPassword>`_
**Request Syntax**
::
response = client.set_smb_guest_password(
GatewayARN='string',
Password='string'
)
**Response Syntax**
::
{
'GatewayARN': 'string'
}
**Response Structure**
- *(dict) --*
- **GatewayARN** *(string) --*
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
:type GatewayARN: string
:param GatewayARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the file gateway the SMB file share is associated with.
:type Password: string
:param Password: **[REQUIRED]**
The password that you want to set for your SMB Server.
:rtype: dict
:returns:
"""
pass
def shutdown_gateway(self, GatewayARN: str) -> Dict:
"""
Shuts down a gateway. To specify which gateway to shut down, use the Amazon Resource Name (ARN) of the gateway in the body of your request.
The operation shuts down the gateway service component running in the gateway's virtual machine (VM) and not the host VM.
.. note::
If you want to shut down the VM, it is recommended that you first shut down the gateway component in the VM to avoid unpredictable conditions.
After the gateway is shutdown, you cannot call any other API except StartGateway , DescribeGatewayInformation , and ListGateways . For more information, see ActivateGateway . Your applications cannot read from or write to the gateway's storage volumes, and there are no snapshots taken.
.. note::
When you make a shutdown request, you will get a ``200 OK`` success response immediately. However, it might take some time for the gateway to shut down. You can call the DescribeGatewayInformation API to check the status. For more information, see ActivateGateway .
If do not intend to use the gateway again, you must delete the gateway (using DeleteGateway ) to no longer pay software charges associated with the gateway.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/ShutdownGateway>`_
**Request Syntax**
::
response = client.shutdown_gateway(
GatewayARN='string'
)
**Response Syntax**
::
{
'GatewayARN': 'string'
}
**Response Structure**
- *(dict) --*
A JSON object containing the of the gateway that was shut down.
- **GatewayARN** *(string) --*
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
:type GatewayARN: string
:param GatewayARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
:rtype: dict
:returns:
"""
pass
def start_gateway(self, GatewayARN: str) -> Dict:
"""
Starts a gateway that you previously shut down (see ShutdownGateway ). After the gateway starts, you can then make other API calls, your applications can read from or write to the gateway's storage volumes and you will be able to take snapshot backups.
.. note::
When you make a request, you will get a 200 OK success response immediately. However, it might take some time for the gateway to be ready. You should call DescribeGatewayInformation and check the status before making any additional API calls. For more information, see ActivateGateway .
To specify which gateway to start, use the Amazon Resource Name (ARN) of the gateway in your request.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/StartGateway>`_
**Request Syntax**
::
response = client.start_gateway(
GatewayARN='string'
)
**Response Syntax**
::
{
'GatewayARN': 'string'
}
**Response Structure**
- *(dict) --*
A JSON object containing the of the gateway that was restarted.
- **GatewayARN** *(string) --*
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
:type GatewayARN: string
:param GatewayARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
:rtype: dict
:returns:
"""
pass
def update_bandwidth_rate_limit(self, GatewayARN: str, AverageUploadRateLimitInBitsPerSec: int = None, AverageDownloadRateLimitInBitsPerSec: int = None) -> Dict:
"""
Updates the bandwidth rate limits of a gateway. You can update both the upload and download bandwidth rate limit or specify only one of the two. If you don't set a bandwidth rate limit, the existing rate limit remains.
By default, a gateway's bandwidth rate limits are not set. If you don't set any limit, the gateway does not have any limitations on its bandwidth usage and could potentially use the maximum available bandwidth.
To specify which gateway to update, use the Amazon Resource Name (ARN) of the gateway in your request.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/UpdateBandwidthRateLimit>`_
**Request Syntax**
::
response = client.update_bandwidth_rate_limit(
GatewayARN='string',
AverageUploadRateLimitInBitsPerSec=123,
AverageDownloadRateLimitInBitsPerSec=123
)
**Response Syntax**
::
{
'GatewayARN': 'string'
}
**Response Structure**
- *(dict) --*
A JSON object containing the of the gateway whose throttle information was updated.
- **GatewayARN** *(string) --*
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
:type GatewayARN: string
:param GatewayARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
:type AverageUploadRateLimitInBitsPerSec: integer
:param AverageUploadRateLimitInBitsPerSec:
The average upload bandwidth rate limit in bits per second.
:type AverageDownloadRateLimitInBitsPerSec: integer
:param AverageDownloadRateLimitInBitsPerSec:
The average download bandwidth rate limit in bits per second.
:rtype: dict
:returns:
"""
pass
def update_chap_credentials(self, TargetARN: str, SecretToAuthenticateInitiator: str, InitiatorName: str, SecretToAuthenticateTarget: str = None) -> Dict:
"""
Updates the Challenge-Handshake Authentication Protocol (CHAP) credentials for a specified iSCSI target. By default, a gateway does not have CHAP enabled; however, for added security, you might use it.
.. warning::
When you update CHAP credentials, all existing connections on the target are closed and initiators must reconnect with the new credentials.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/UpdateChapCredentials>`_
**Request Syntax**
::
response = client.update_chap_credentials(
TargetARN='string',
SecretToAuthenticateInitiator='string',
InitiatorName='string',
SecretToAuthenticateTarget='string'
)
**Response Syntax**
::
{
'TargetARN': 'string',
'InitiatorName': 'string'
}
**Response Structure**
- *(dict) --*
A JSON object containing the following fields:
- **TargetARN** *(string) --*
The Amazon Resource Name (ARN) of the target. This is the same target specified in the request.
- **InitiatorName** *(string) --*
The iSCSI initiator that connects to the target. This is the same initiator name specified in the request.
:type TargetARN: string
:param TargetARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the iSCSI volume target. Use the DescribeStorediSCSIVolumes operation to return the TargetARN for specified VolumeARN.
:type SecretToAuthenticateInitiator: string
:param SecretToAuthenticateInitiator: **[REQUIRED]**
The secret key that the initiator (for example, the Windows client) must provide to participate in mutual CHAP with the target.
.. note::
The secret key must be between 12 and 16 bytes when encoded in UTF-8.
:type InitiatorName: string
:param InitiatorName: **[REQUIRED]**
The iSCSI initiator that connects to the target.
:type SecretToAuthenticateTarget: string
:param SecretToAuthenticateTarget:
The secret key that the target must provide to participate in mutual CHAP with the initiator (e.g. Windows client).
Byte constraints: Minimum bytes of 12. Maximum bytes of 16.
.. note::
The secret key must be between 12 and 16 bytes when encoded in UTF-8.
:rtype: dict
:returns:
"""
pass
def update_gateway_information(self, GatewayARN: str, GatewayName: str = None, GatewayTimezone: str = None) -> Dict:
"""
Updates a gateway's metadata, which includes the gateway's name and time zone. To specify which gateway to update, use the Amazon Resource Name (ARN) of the gateway in your request.
.. note::
For Gateways activated after September 2, 2015, the gateway's ARN contains the gateway ID rather than the gateway name. However, changing the name of the gateway has no effect on the gateway's ARN.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/UpdateGatewayInformation>`_
**Request Syntax**
::
response = client.update_gateway_information(
GatewayARN='string',
GatewayName='string',
GatewayTimezone='string'
)
**Response Syntax**
::
{
'GatewayARN': 'string',
'GatewayName': 'string'
}
**Response Structure**
- *(dict) --*
A JSON object containing the ARN of the gateway that was updated.
- **GatewayARN** *(string) --*
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
- **GatewayName** *(string) --*
The name you configured for your gateway.
:type GatewayARN: string
:param GatewayARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
:type GatewayName: string
:param GatewayName:
The name you configured for your gateway.
:type GatewayTimezone: string
:param GatewayTimezone:
A value that indicates the time zone of the gateway.
:rtype: dict
:returns:
"""
pass
def update_gateway_software_now(self, GatewayARN: str) -> Dict:
"""
Updates the gateway virtual machine (VM) software. The request immediately triggers the software update.
.. note::
When you make this request, you get a ``200 OK`` success response immediately. However, it might take some time for the update to complete. You can call DescribeGatewayInformation to verify the gateway is in the ``STATE_RUNNING`` state.
.. warning::
A software update forces a system restart of your gateway. You can minimize the chance of any disruption to your applications by increasing your iSCSI Initiators' timeouts. For more information about increasing iSCSI Initiator timeouts for Windows and Linux, see `Customizing Your Windows iSCSI Settings <https://docs.aws.amazon.com/storagegateway/latest/userguide/ConfiguringiSCSIClientInitiatorWindowsClient.html#CustomizeWindowsiSCSISettings>`__ and `Customizing Your Linux iSCSI Settings <https://docs.aws.amazon.com/storagegateway/latest/userguide/ConfiguringiSCSIClientInitiatorRedHatClient.html#CustomizeLinuxiSCSISettings>`__ , respectively.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/UpdateGatewaySoftwareNow>`_
**Request Syntax**
::
response = client.update_gateway_software_now(
GatewayARN='string'
)
**Response Syntax**
::
{
'GatewayARN': 'string'
}
**Response Structure**
- *(dict) --*
A JSON object containing the of the gateway that was updated.
- **GatewayARN** *(string) --*
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
:type GatewayARN: string
:param GatewayARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
:rtype: dict
:returns:
"""
pass
def update_maintenance_start_time(self, GatewayARN: str, HourOfDay: int, MinuteOfHour: int, DayOfWeek: int = None, DayOfMonth: int = None) -> Dict:
"""
Updates a gateway's weekly maintenance start time information, including day and time of the week. The maintenance time is the time in your gateway's time zone.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/UpdateMaintenanceStartTime>`_
**Request Syntax**
::
response = client.update_maintenance_start_time(
GatewayARN='string',
HourOfDay=123,
MinuteOfHour=123,
DayOfWeek=123,
DayOfMonth=123
)
**Response Syntax**
::
{
'GatewayARN': 'string'
}
**Response Structure**
- *(dict) --*
A JSON object containing the of the gateway whose maintenance start time is updated.
- **GatewayARN** *(string) --*
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
:type GatewayARN: string
:param GatewayARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
:type HourOfDay: integer
:param HourOfDay: **[REQUIRED]**
The hour component of the maintenance start time represented as *hh* , where *hh* is the hour (00 to 23). The hour of the day is in the time zone of the gateway.
:type MinuteOfHour: integer
:param MinuteOfHour: **[REQUIRED]**
The minute component of the maintenance start time represented as *mm* , where *mm* is the minute (00 to 59). The minute of the hour is in the time zone of the gateway.
:type DayOfWeek: integer
:param DayOfWeek:
The day of the week component of the maintenance start time week represented as an ordinal number from 0 to 6, where 0 represents Sunday and 6 Saturday.
:type DayOfMonth: integer
:param DayOfMonth:
The day of the month component of the maintenance start time represented as an ordinal number from 1 to 28, where 1 represents the first day of the month and 28 represents the last day of the month.
.. note::
This value is only available for tape and volume gateways.
:rtype: dict
:returns:
"""
pass
def update_nfs_file_share(self, FileShareARN: str, KMSEncrypted: bool = None, KMSKey: str = None, NFSFileShareDefaults: Dict = None, DefaultStorageClass: str = None, ObjectACL: str = None, ClientList: List = None, Squash: str = None, ReadOnly: bool = None, GuessMIMETypeEnabled: bool = None, RequesterPays: bool = None) -> Dict:
"""
Updates a Network File System (NFS) file share. This operation is only supported in the file gateway type.
.. note::
To leave a file share field unchanged, set the corresponding input field to null.
Updates the following file share setting:
* Default storage class for your S3 bucket
* Metadata defaults for your S3 bucket
* Allowed NFS clients for your file share
* Squash settings
* Write status of your file share
.. note::
To leave a file share field unchanged, set the corresponding input field to null. This operation is only supported in file gateways.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/UpdateNFSFileShare>`_
**Request Syntax**
::
response = client.update_nfs_file_share(
FileShareARN='string',
KMSEncrypted=True|False,
KMSKey='string',
NFSFileShareDefaults={
'FileMode': 'string',
'DirectoryMode': 'string',
'GroupId': 123,
'OwnerId': 123
},
DefaultStorageClass='string',
ObjectACL='private'|'public-read'|'public-read-write'|'authenticated-read'|'bucket-owner-read'|'bucket-owner-full-control'|'aws-exec-read',
ClientList=[
'string',
],
Squash='string',
ReadOnly=True|False,
GuessMIMETypeEnabled=True|False,
RequesterPays=True|False
)
**Response Syntax**
::
{
'FileShareARN': 'string'
}
**Response Structure**
- *(dict) --*
UpdateNFSFileShareOutput
- **FileShareARN** *(string) --*
The Amazon Resource Name (ARN) of the updated file share.
:type FileShareARN: string
:param FileShareARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the file share to be updated.
:type KMSEncrypted: boolean
:param KMSEncrypted:
True to use Amazon S3 server side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.
:type KMSKey: string
:param KMSKey:
The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server side encryption. This value can only be set when KMSEncrypted is true. Optional.
:type NFSFileShareDefaults: dict
:param NFSFileShareDefaults:
The default values for the file share. Optional.
- **FileMode** *(string) --*
The Unix file mode in the form \"nnnn\". For example, \"0666\" represents the default file mode inside the file share. The default value is 0666.
- **DirectoryMode** *(string) --*
The Unix directory mode in the form \"nnnn\". For example, \"0666\" represents the default access mode for all directories inside the file share. The default value is 0777.
- **GroupId** *(integer) --*
The default group ID for the file share (unless the files have another group ID specified). The default value is nfsnobody.
- **OwnerId** *(integer) --*
The default owner ID for files in the file share (unless the files have another owner ID specified). The default value is nfsnobody.
:type DefaultStorageClass: string
:param DefaultStorageClass:
The default storage class for objects put into an Amazon S3 bucket by the file gateway. Possible values are ``S3_STANDARD`` , ``S3_STANDARD_IA`` , or ``S3_ONEZONE_IA`` . If this field is not populated, the default value ``S3_STANDARD`` is used. Optional.
:type ObjectACL: string
:param ObjectACL:
A value that sets the access control list permission for objects in the S3 bucket that a file gateway puts objects into. The default value is \"private\".
:type ClientList: list
:param ClientList:
The list of clients that are allowed to access the file gateway. The list must contain either valid IP addresses or valid CIDR blocks.
- *(string) --*
:type Squash: string
:param Squash:
The user mapped to anonymous user. Valid options are the following:
* ``RootSquash`` - Only root is mapped to anonymous user.
* ``NoSquash`` - No one is mapped to anonymous user
* ``AllSquash`` - Everyone is mapped to anonymous user.
:type ReadOnly: boolean
:param ReadOnly:
A value that sets the write status of a file share. This value is true if the write status is read-only, and otherwise false.
:type GuessMIMETypeEnabled: boolean
:param GuessMIMETypeEnabled:
A value that enables guessing of the MIME type for uploaded objects based on file extensions. Set this value to true to enable MIME type guessing, and otherwise to false. The default value is true.
:type RequesterPays: boolean
:param RequesterPays:
A value that sets who pays the cost of the request and the cost associated with data download from the S3 bucket. If this value is set to true, the requester pays the costs. Otherwise the S3 bucket owner pays. However, the S3 bucket owner always pays the cost of storing data.
.. note::
``RequesterPays`` is a configuration for the S3 bucket that backs the file share, so make sure that the configuration on the file share is the same as the S3 bucket configuration.
:rtype: dict
:returns:
"""
pass
def update_smb_file_share(self, FileShareARN: str, KMSEncrypted: bool = None, KMSKey: str = None, DefaultStorageClass: str = None, ObjectACL: str = None, ReadOnly: bool = None, GuessMIMETypeEnabled: bool = None, RequesterPays: bool = None, SMBACLEnabled: bool = None, ValidUserList: List = None, InvalidUserList: List = None) -> Dict:
"""
Updates a Server Message Block (SMB) file share.
.. note::
To leave a file share field unchanged, set the corresponding input field to null. This operation is only supported for file gateways.
.. warning::
File gateways require AWS Security Token Service (AWS STS) to be activated to enable you to create a file share. Make sure that AWS STS is activated in the AWS Region you are creating your file gateway in. If AWS STS is not activated in this AWS Region, activate it. For information about how to activate AWS STS, see `Activating and Deactivating AWS STS in an AWS Region <https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html>`__ in the *AWS Identity and Access Management User Guide.*
File gateways don't support creating hard or symbolic links on a file share.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/UpdateSMBFileShare>`_
**Request Syntax**
::
response = client.update_smb_file_share(
FileShareARN='string',
KMSEncrypted=True|False,
KMSKey='string',
DefaultStorageClass='string',
ObjectACL='private'|'public-read'|'public-read-write'|'authenticated-read'|'bucket-owner-read'|'bucket-owner-full-control'|'aws-exec-read',
ReadOnly=True|False,
GuessMIMETypeEnabled=True|False,
RequesterPays=True|False,
SMBACLEnabled=True|False,
ValidUserList=[
'string',
],
InvalidUserList=[
'string',
]
)
**Response Syntax**
::
{
'FileShareARN': 'string'
}
**Response Structure**
- *(dict) --*
UpdateSMBFileShareOutput
- **FileShareARN** *(string) --*
The Amazon Resource Name (ARN) of the updated SMB file share.
:type FileShareARN: string
:param FileShareARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the SMB file share that you want to update.
:type KMSEncrypted: boolean
:param KMSEncrypted:
True to use Amazon S3 server side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.
:type KMSKey: string
:param KMSKey:
The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server side encryption. This value can only be set when KMSEncrypted is true. Optional.
:type DefaultStorageClass: string
:param DefaultStorageClass:
The default storage class for objects put into an Amazon S3 bucket by the file gateway. Possible values are ``S3_STANDARD`` , ``S3_STANDARD_IA`` , or ``S3_ONEZONE_IA`` . If this field is not populated, the default value ``S3_STANDARD`` is used. Optional.
:type ObjectACL: string
:param ObjectACL:
A value that sets the access control list permission for objects in the S3 bucket that a file gateway puts objects into. The default value is \"private\".
:type ReadOnly: boolean
:param ReadOnly:
A value that sets the write status of a file share. This value is true if the write status is read-only, and otherwise false.
:type GuessMIMETypeEnabled: boolean
:param GuessMIMETypeEnabled:
A value that enables guessing of the MIME type for uploaded objects based on file extensions. Set this value to true to enable MIME type guessing, and otherwise to false. The default value is true.
:type RequesterPays: boolean
:param RequesterPays:
A value that sets who pays the cost of the request and the cost associated with data download from the S3 bucket. If this value is set to true, the requester pays the costs. Otherwise the S3 bucket owner pays. However, the S3 bucket owner always pays the cost of storing data.
.. note::
``RequesterPays`` is a configuration for the S3 bucket that backs the file share, so make sure that the configuration on the file share is the same as the S3 bucket configuration.
:type SMBACLEnabled: boolean
:param SMBACLEnabled:
Set this value to \"true to enable ACL (access control list) on the SMB file share. Set it to \"false\" to map file and directory permissions to the POSIX permissions.
:type ValidUserList: list
:param ValidUserList:
A list of users or groups in the Active Directory that are allowed to access the file share. A group must be prefixed with the @ character. For example ``@group1`` . Can only be set if Authentication is set to ``ActiveDirectory`` .
- *(string) --*
:type InvalidUserList: list
:param InvalidUserList:
A list of users or groups in the Active Directory that are not allowed to access the file share. A group must be prefixed with the @ character. For example ``@group1`` . Can only be set if Authentication is set to ``ActiveDirectory`` .
- *(string) --*
:rtype: dict
:returns:
"""
pass
def update_snapshot_schedule(self, VolumeARN: str, StartAt: int, RecurrenceInHours: int, Description: str = None) -> Dict:
"""
Updates a snapshot schedule configured for a gateway volume. This operation is only supported in the cached volume and stored volume gateway types.
The default snapshot schedule for volume is once every 24 hours, starting at the creation time of the volume. You can use this API to change the snapshot schedule configured for the volume.
In the request you must identify the gateway volume whose snapshot schedule you want to update, and the schedule information, including when you want the snapshot to begin on a day and the frequency (in hours) of snapshots.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/UpdateSnapshotSchedule>`_
**Request Syntax**
::
response = client.update_snapshot_schedule(
VolumeARN='string',
StartAt=123,
RecurrenceInHours=123,
Description='string'
)
**Response Syntax**
::
{
'VolumeARN': 'string'
}
**Response Structure**
- *(dict) --*
A JSON object containing the of the updated storage volume.
- **VolumeARN** *(string) --*
The Amazon Resource Name (ARN) of the volume. Use the ListVolumes operation to return a list of gateway volumes.
:type VolumeARN: string
:param VolumeARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the volume. Use the ListVolumes operation to return a list of gateway volumes.
:type StartAt: integer
:param StartAt: **[REQUIRED]**
The hour of the day at which the snapshot schedule begins represented as *hh* , where *hh* is the hour (0 to 23). The hour of the day is in the time zone of the gateway.
:type RecurrenceInHours: integer
:param RecurrenceInHours: **[REQUIRED]**
Frequency of snapshots. Specify the number of hours between snapshots.
:type Description: string
:param Description:
Optional description of the snapshot that overwrites the existing description.
:rtype: dict
:returns:
"""
pass
def update_vtl_device_type(self, VTLDeviceARN: str, DeviceType: str) -> Dict:
"""
Updates the type of medium changer in a tape gateway. When you activate a tape gateway, you select a medium changer type for the tape gateway. This operation enables you to select a different type of medium changer after a tape gateway is activated. This operation is only supported in the tape gateway type.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/UpdateVTLDeviceType>`_
**Request Syntax**
::
response = client.update_vtl_device_type(
VTLDeviceARN='string',
DeviceType='string'
)
**Response Syntax**
::
{
'VTLDeviceARN': 'string'
}
**Response Structure**
- *(dict) --*
UpdateVTLDeviceTypeOutput
- **VTLDeviceARN** *(string) --*
The Amazon Resource Name (ARN) of the medium changer you have selected.
:type VTLDeviceARN: string
:param VTLDeviceARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the medium changer you want to select.
:type DeviceType: string
:param DeviceType: **[REQUIRED]**
The type of medium changer you want to select.
Valid Values: \"STK-L700\", \"AWS-Gateway-VTL\"
:rtype: dict
:returns:
"""
pass
| 57.495285
| 659
| 0.603601
| 26,587
| 231,706
| 5.241283
| 0.049987
| 0.014604
| 0.02648
| 0.030893
| 0.745791
| 0.698723
| 0.66897
| 0.651173
| 0.626767
| 0.609609
| 0
| 0.009944
| 0.316837
| 231,706
| 4,029
| 660
| 57.509556
| 0.870386
| 0.824972
| 0
| 0.474684
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.474684
| false
| 0.493671
| 0.044304
| 0
| 0.525316
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
556990c18f0eed34546a738b0376809b1c2c8d3f
| 40
|
py
|
Python
|
hardware-testing/hardware_testing/gravimetric_test/protocol_implementations/__init__.py
|
Opentrons/protocol_framework
|
ebbd6b2fe984edd6ecfcbf1dbe040db7f7356b9f
|
[
"Apache-2.0"
] | null | null | null |
hardware-testing/hardware_testing/gravimetric_test/protocol_implementations/__init__.py
|
Opentrons/protocol_framework
|
ebbd6b2fe984edd6ecfcbf1dbe040db7f7356b9f
|
[
"Apache-2.0"
] | null | null | null |
hardware-testing/hardware_testing/gravimetric_test/protocol_implementations/__init__.py
|
Opentrons/protocol_framework
|
ebbd6b2fe984edd6ecfcbf1dbe040db7f7356b9f
|
[
"Apache-2.0"
] | null | null | null |
"""Protocol implementations package."""
| 20
| 39
| 0.75
| 3
| 40
| 10
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075
| 40
| 1
| 40
| 40
| 0.810811
| 0.825
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
557a5d1402f2704605a92422848c9fd2f338c19f
| 46
|
py
|
Python
|
prla/assignments/a0/decrypt.py
|
AegirAexx/python-sandbox
|
fa1f584f615c6ed04f80b9dd92d2b241248c9ebe
|
[
"Unlicense"
] | null | null | null |
prla/assignments/a0/decrypt.py
|
AegirAexx/python-sandbox
|
fa1f584f615c6ed04f80b9dd92d2b241248c9ebe
|
[
"Unlicense"
] | null | null | null |
prla/assignments/a0/decrypt.py
|
AegirAexx/python-sandbox
|
fa1f584f615c6ed04f80b9dd92d2b241248c9ebe
|
[
"Unlicense"
] | null | null | null |
def decrypt(message):
return message[::3]
| 15.333333
| 23
| 0.673913
| 6
| 46
| 5.166667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026316
| 0.173913
| 46
| 2
| 24
| 23
| 0.789474
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 5
|
559db2f694a319a275f2dbe52066ac7bc1d82fd1
| 35
|
py
|
Python
|
competitive/AtCoder/nikkei2019-2-qual/A.py
|
pn11/benkyokai
|
9ebdc46b529e76b7196add26dbc1e62ad48e72b0
|
[
"MIT"
] | null | null | null |
competitive/AtCoder/nikkei2019-2-qual/A.py
|
pn11/benkyokai
|
9ebdc46b529e76b7196add26dbc1e62ad48e72b0
|
[
"MIT"
] | 22
|
2020-03-24T16:24:47.000Z
|
2022-02-26T15:51:18.000Z
|
competitive/AtCoder/nikkei2019-2-qual/A.py
|
pn11/benkyokai
|
9ebdc46b529e76b7196add26dbc1e62ad48e72b0
|
[
"MIT"
] | null | null | null |
N = int(input())
print((N+1)//2-1)
| 11.666667
| 17
| 0.514286
| 8
| 35
| 2.25
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096774
| 0.114286
| 35
| 2
| 18
| 17.5
| 0.483871
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
e95df9ad130f1a4b9c9567ea241ed5a57a292b6f
| 799
|
py
|
Python
|
simons_mask_binarizer/_nbdev.py
|
SRSteinkamp/simons_mask_binarizer
|
f4e74868487a4f3311b6e1da70535729e364272d
|
[
"Apache-2.0"
] | null | null | null |
simons_mask_binarizer/_nbdev.py
|
SRSteinkamp/simons_mask_binarizer
|
f4e74868487a4f3311b6e1da70535729e364272d
|
[
"Apache-2.0"
] | null | null | null |
simons_mask_binarizer/_nbdev.py
|
SRSteinkamp/simons_mask_binarizer
|
f4e74868487a4f3311b6e1da70535729e364272d
|
[
"Apache-2.0"
] | null | null | null |
# AUTOGENERATED BY NBDEV! DO NOT EDIT!
__all__ = ["index", "modules", "custom_doc_links", "git_url"]
index = {"binarize_mask": "00_mask_transform.ipynb",
"data_loader": "00_mask_transform.ipynb",
"data_saver": "00_mask_transform.ipynb",
"outpath_from_inpath": "00_mask_transform.ipynb",
"check_outpath": "00_mask_transform.ipynb",
"create_outpath": "00_mask_transform.ipynb",
"savingpath": "00_mask_transform.ipynb",
"mask_transform": "00_mask_transform.ipynb",
"nifti_binarizer": "00_mask_transform.ipynb"}
modules = ["mask_transform.py"]
doc_url = "https://SRSteinkamp.github.io/simons_mask_binarizer/"
git_url = "https://github.com/SRSteinkamp/simons_mask_binarizer/tree/main/"
def custom_doc_links(name): return None
| 36.318182
| 75
| 0.703379
| 100
| 799
| 5.19
| 0.43
| 0.27553
| 0.260116
| 0.346821
| 0.196532
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026667
| 0.155194
| 799
| 21
| 76
| 38.047619
| 0.742222
| 0.045056
| 0
| 0
| 1
| 0
| 0.647832
| 0.272011
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0
| 0.071429
| 0.071429
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
e971a18f3fce810673635256959c0e6d015f9917
| 49
|
py
|
Python
|
test/lmp/model/_lstm_2002/__init__.py
|
ProFatXuanAll/char-RNN
|
531f101b3d1ba20bafd28ca060aafe6f583d1efb
|
[
"Beerware"
] | null | null | null |
test/lmp/model/_lstm_2002/__init__.py
|
ProFatXuanAll/char-RNN
|
531f101b3d1ba20bafd28ca060aafe6f583d1efb
|
[
"Beerware"
] | null | null | null |
test/lmp/model/_lstm_2002/__init__.py
|
ProFatXuanAll/char-RNN
|
531f101b3d1ba20bafd28ca060aafe6f583d1efb
|
[
"Beerware"
] | null | null | null |
"""Test :py:mod:`lmp.model._lstm_2002` entry."""
| 24.5
| 48
| 0.653061
| 8
| 49
| 3.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086957
| 0.061224
| 49
| 1
| 49
| 49
| 0.565217
| 0.857143
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
e99a6c18daac6cfdfce659dfb540ca8d651c0a7a
| 43
|
py
|
Python
|
peco/parser/inline_if_then.py
|
Tikubonn/peco
|
c77fc163ad31d3c271d299747914ce4ef3386987
|
[
"MIT"
] | null | null | null |
peco/parser/inline_if_then.py
|
Tikubonn/peco
|
c77fc163ad31d3c271d299747914ce4ef3386987
|
[
"MIT"
] | null | null | null |
peco/parser/inline_if_then.py
|
Tikubonn/peco
|
c77fc163ad31d3c271d299747914ce4ef3386987
|
[
"MIT"
] | null | null | null |
class InlineIfThen (Exception):
pass
| 8.6
| 31
| 0.697674
| 4
| 43
| 7.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.232558
| 43
| 4
| 32
| 10.75
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
7580e839773ddb1a67fc19fdbceae1bf47500583
| 4,780
|
py
|
Python
|
qiskit/ignis/characterization/coherence/circuits.py
|
ajavadia/qiskit-ignis
|
c03d1bd22f49f461e7f3112cb8b854e92f6d961f
|
[
"Apache-2.0"
] | null | null | null |
qiskit/ignis/characterization/coherence/circuits.py
|
ajavadia/qiskit-ignis
|
c03d1bd22f49f461e7f3112cb8b854e92f6d961f
|
[
"Apache-2.0"
] | null | null | null |
qiskit/ignis/characterization/coherence/circuits.py
|
ajavadia/qiskit-ignis
|
c03d1bd22f49f461e7f3112cb8b854e92f6d961f
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2019, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
"""
Circuit generation for coherence experiments
"""
import numpy as np
import qiskit
from .coherence_utils import pad_id_gates
def t1_circuits(num_of_gates, gate_time, num_of_qubits, qubit):
"""
Generates circuit for T1 measurement.
Each circuit consists of an X gate, followed by a sequence of identity
gates.
Args:
num_of_gates (list of integers): the number of identity gates in each
circuit. Must be in an increasing
order.
gate_time (float): time in micro-seconds of running a single gate.
num_of_qubits (integer): the number of qubits in the circuit.
qubit (integer): index of the qubit whose T1 is to be measured.
Returns:
A list of QuantumCircuit
xdata: a list of delay times in seconds
"""
xdata = gate_time * num_of_gates
qr = qiskit.QuantumRegister(num_of_qubits)
cr = qiskit.ClassicalRegister(num_of_qubits)
circuits = []
for circ_index, circ_length in enumerate(num_of_gates):
circ = qiskit.QuantumCircuit(qr, cr)
circ.name = 'circuit_' + str(circ_index)
circ.x(qr[qubit])
circ = pad_id_gates(circ, qr, circ_length)
circ.barrier(qr[qubit])
circ.measure(qr[qubit], cr[qubit])
circuits.append(circ)
return circuits, xdata
def t2star_circuits(num_of_gates, gate_time, num_of_qubits, qubit, nosc=0):
"""
Generates circuit for T2* measurement.
Each circuit consists of a Hadamard gate, followed by a sequence of
identity gates, a phase gate (with a linear phase), and an additional
Hadamard gate.
Args:
num_of_gates (list of integers): the number of identity gates in each
circuit. Must be in an increasing
order.
gate_time (float): time in micro-seconds of running a single gate.
num_of_qubits (integer): the number of qubits in the circuit.
qubit (integer): index of the qubit whose T1 is to be measured.
nosc: number of oscillations to induce using the phase gate
Returns:
A list of QuantumCircuit
xdata: the delay times
osc_freq: the induced oscillation frequency
"""
xdata = gate_time * num_of_gates
qr = qiskit.QuantumRegister(num_of_qubits)
cr = qiskit.ClassicalRegister(num_of_qubits)
osc_freq = nosc/xdata[-1]
circuits = []
for circ_index, circ_length in enumerate(num_of_gates):
circ = qiskit.QuantumCircuit(qr, cr)
circ.name = 'circuit_' + str(circ_index)
circ.h(qr[qubit])
circ = pad_id_gates(circ, qr, circ_length)
circ.barrier(qr[qubit])
circ.u1(2*np.pi*osc_freq*xdata[circ_index], qr[qubit])
circ.h(qr[qubit])
circ.measure(qr[qubit], cr[qubit])
circuits.append(circ)
return circuits, xdata, osc_freq
def t2_circuits(num_of_gates, gate_time, num_of_qubits, qubit):
"""
Generates circuit for T2 (echo) measurement.
Each circuit consists of a Y90 gate, followed by a sequence of identity
gates, an Y gate, a sequence of identity gates and an additional Y90 gate.
Args:
num_of_gates (list of integers): the number of identity gates in each
circuit. Must be in an increasing
order. This is the number of gates
between the H and echo (i.e. total
length is twice)
gate_time (float): time in micro-seconds of running a single gate.
num_of_qubits (integer): the number of qubits in the circuit.
qubit (integer): index of the qubit whose T1 is to be measured.
Returns:
A list of QuantumCircuit
xdata: the delay times (TOTAL delay time)
"""
xdata = gate_time * num_of_gates * 2.0
qr = qiskit.QuantumRegister(num_of_qubits)
cr = qiskit.ClassicalRegister(num_of_qubits)
circuits = []
for circ_index, circ_length in enumerate(num_of_gates):
circ = qiskit.QuantumCircuit(qr, cr)
circ.name = 'circuit_' + str(circ_index)
circ.u2(0.0, 0.0, qr[qubit])
circ = pad_id_gates(circ, qr, circ_length)
circ.barrier(qr[qubit])
circ.y(qr[qubit])
circ = pad_id_gates(circ, qr, circ_length)
circ.barrier(qr[qubit])
circ.u2(0.0, 0.0, qr[qubit])
circ.measure(qr[qubit], cr[qubit])
circuits.append(circ)
return circuits, xdata
| 34.388489
| 78
| 0.632008
| 663
| 4,780
| 4.417798
| 0.190045
| 0.04097
| 0.04097
| 0.02663
| 0.785251
| 0.764083
| 0.733698
| 0.733698
| 0.694776
| 0.670536
| 0
| 0.010604
| 0.289749
| 4,780
| 138
| 79
| 34.637681
| 0.852135
| 0.502092
| 0
| 0.769231
| 0
| 0
| 0.011116
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057692
| false
| 0
| 0.057692
| 0
| 0.173077
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
758f89c34f62a95b896e859361805235952a4d83
| 117
|
py
|
Python
|
pynumoptimizer/__init__.py
|
ltadeu6/Optimization_python
|
3f7bb389426618fa7987114df9d8e90c160ca3f0
|
[
"MIT"
] | null | null | null |
pynumoptimizer/__init__.py
|
ltadeu6/Optimization_python
|
3f7bb389426618fa7987114df9d8e90c160ca3f0
|
[
"MIT"
] | null | null | null |
pynumoptimizer/__init__.py
|
ltadeu6/Optimization_python
|
3f7bb389426618fa7987114df9d8e90c160ca3f0
|
[
"MIT"
] | null | null | null |
from .utils import otimization
from .genetic import Genetic
from .nelder_mead import NelderMead
from .pso import PSO
| 23.4
| 35
| 0.82906
| 17
| 117
| 5.647059
| 0.529412
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136752
| 117
| 4
| 36
| 29.25
| 0.950495
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
759a0aa8dc6dc91d376a03da2e4a3badce4518fb
| 6,309
|
py
|
Python
|
views/api_views.py
|
marios8543/Uwuchan
|
bd42723bc007f67e3fe74d86f214fc76e504ae8c
|
[
"MIT"
] | null | null | null |
views/api_views.py
|
marios8543/Uwuchan
|
bd42723bc007f67e3fe74d86f214fc76e504ae8c
|
[
"MIT"
] | null | null | null |
views/api_views.py
|
marios8543/Uwuchan
|
bd42723bc007f67e3fe74d86f214fc76e504ae8c
|
[
"MIT"
] | null | null | null |
from app import app,db,cfg,name
import models
from views import utils
import flask
import os
request = flask.request
@app.route('/')
def index():
user = utils.session_check(request)
if user.fng:
resp = utils.response(200,'Registered')
resp.set_cookie(cfg['cookie_name'],user.id,user)
return resp
else:
return utils.response(200,'Welcome Back!',user)
@app.route('/api/make_post',methods=['POST','GET'])
def make_post():
board = models.Board(request.args.get('board'))
user = utils.session_check(request)
if not user or user.fng:
return utils.response(403,'You have been banned or have never visited before. If its the latter visit home first.',user)
if not board.name:
return utils.response(400,'That board does not exist.',user)
if not utils.restrict_check(request,board):
return utils.response(403,'You have been banned from posting in this board.',user)
subject = request.form['subject'][:int(cfg['max_subject_length'])]
comment = request.form['comment'][:int(cfg['max_comment_length'])]
if 'subject' in request.form:
subject = request.form['subject'][:int(cfg['max_subject_length'])]
else:
subject = ''
if 'file' in request.files:
file = request.files['file']
if not utils.allowed_file(file):
return utils.response(400,'The image is of an unacceptable format or too large.',user)
img = models.Image(file=file)
if not img:
return utils.response(500,'Something went wrong. Try again later',user)
else:
img = models.Image(dummy=True)
img.id = 0
post = models.Post(board=board,author=user,subject=subject,comment=comment,image=img.id)
if post:
return utils.response(201,post.dictate(),user)
else:
return utils.response(500,'Something went wrong. Try again later.',user)
@app.route('/api/get_post')
def get_post():
user = utils.session_check(request)
if not user or user.fng:
return utils.response(403,'You have been banned or have never visited before. If its the latter visit home first.',user)
board = models.Board(request.args.get('board'))
if not board.name:
return utils.response(400,'That board does not exist.',user)
res = models.Post(board=board,id=request.args.get('id'))
if res.board:
return utils.response(200,res.dictate(),user)
else:
return utils.response(404,'That post does not exit',user)
@app.route('/api/add_comment',methods=['POST','GET'])
def add_comment():
user = utils.session_check(request)
board = models.Board(request.args.get('board'))
post = models.Post(id=request.args.get('post'),board=board)
if 'subject' in request.form:
subject = request.form['subject'][:int(cfg['max_subject_length'])]
else:
subject = ''
comment = request.form['comment'][:int(cfg['max_comment_length'])]
if not user or user.fng:
return utils.response(403,'You have been banned or have never visited before. If its the latter visit home first.',user)
if not board.name:
return utils.response(404,'That board does not exist.',user)
if not utils.restrict_check(request,board):
return utils.response(403,'You have been banned from posting in this board.',user)
post = models.Post(id=request.args.get('post'),board=board)
if not post.board:
return utils.response(404,'That post does not exist.',user)
if post.thread==0:
thread = post.id
else:
thread = post.thread
if 'file' in request.files:
file = request.files['file']
if not utils.allowed_file(file):
return utils.response(400,'The image is of an unacceptable format or too large.',user)
img = models.Image(file=file)
if not img:
return utils.response(500,'Something went wrong. Try again later',user)
else:
img = models.Image(dummy=True)
img.id = 0
comment = ">>{}\n{}".format(post.id,comment)
post = models.Post(board=board,author=user,subject=subject,comment=comment,image=img.id,thread=thread)
if post:
return utils.response(201,post.dictate(),user)
else:
return utils.response(500,'Something went wrong. Try again later.',user)
@app.route('/api/get_boards')
def get_boards():
user = utils.session_check(request)
if not user or user.fng:
return utils.response(403,'You have been banned or have never visited before. If its the latter visit home first.',user)
arr = []
res = db.selectmany(table='boards',fields=['name','color','pinned'])
for r in res:
arr.append(r.__dict__)
return utils.response(200,arr,user)
@app.route('/api/get_posts')
def get_posts():
user = utils.session_check(request)
if not user or user.fng:
return utils.response(403,'You have been banned or have never visited before. If its the latter visit home first.',user)
board = models.Board(request.args.get('board'))
if not board.name:
return utils.response(404,'That board does not exist.',user)
limit = int(request.args.get('limit'))
if limit>int(cfg['post_pull_limit']):
return utils.response(400,'You can only pull {} posts per request'.format(cfg['post_pull_limit']),user)
posts = models.get_posts(board,limit,dicc=True)
return utils.response(200,posts,user)
@app.route('/api/delete_post')
def delete_post():
user = utils.session_check(request)
if not user or user.fng:
return utils.response(403,'You have been banned or have never visited before. If its the latter visit home first.',user)
board = models.Board(request.args.get('board'))
if not board.name:
return utils.response(404,'That board does not exist.',user)
post = models.Post(id=request.args.get('post'),board=board)
if not post.board:
return utils.response(404,'That post does not exist.',user)
if post.author.id == user.id:
if post.delete():
return utils.response(200,'Post deleted successfully',user)
else:
return utils.response(500,'Something went wrong. Try again later.',user)
else:
return utils.response(403,'You are not the author of this post.',user)
| 41.506579
| 128
| 0.662863
| 913
| 6,309
| 4.532311
| 0.13253
| 0.103673
| 0.146931
| 0.047849
| 0.768246
| 0.751087
| 0.740213
| 0.723296
| 0.714355
| 0.702997
| 0
| 0.020388
| 0.207006
| 6,309
| 151
| 129
| 41.781457
| 0.806716
| 0
| 0
| 0.647482
| 0
| 0
| 0.253764
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05036
| false
| 0
| 0.035971
| 0
| 0.323741
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
75c937670cc1b6845e7348e88fb3ab86c2d3247c
| 220
|
py
|
Python
|
containment/types/environment.py
|
zancas/protoenv
|
4f7e2c2338e0ca7c107a7b3a9913bb5e6e07245f
|
[
"MIT"
] | null | null | null |
containment/types/environment.py
|
zancas/protoenv
|
4f7e2c2338e0ca7c107a7b3a9913bb5e6e07245f
|
[
"MIT"
] | 3
|
2017-03-11T23:16:15.000Z
|
2017-03-11T23:24:19.000Z
|
containment/types/environment.py
|
zancas/protoenv
|
4f7e2c2338e0ca7c107a7b3a9913bb5e6e07245f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
from typet import Object
from typet import String
class EnvironmentVariable(Object):
name: String[1:]
@property
def value(self):
return os.environ.get(self.name)
| 15.714286
| 40
| 0.668182
| 29
| 220
| 5.068966
| 0.689655
| 0.122449
| 0.204082
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011628
| 0.218182
| 220
| 13
| 41
| 16.923077
| 0.843023
| 0.095455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.375
| 0.125
| 0.875
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 5
|
75db3141352046109beee68bf58451b372ea2c9a
| 44
|
py
|
Python
|
mechanize/_version.py
|
berni69/mechanize
|
6dbb1c56b3664c8b2e2157165d2837e7e8d70eaa
|
[
"BSD-3-Clause"
] | null | null | null |
mechanize/_version.py
|
berni69/mechanize
|
6dbb1c56b3664c8b2e2157165d2837e7e8d70eaa
|
[
"BSD-3-Clause"
] | null | null | null |
mechanize/_version.py
|
berni69/mechanize
|
6dbb1c56b3664c8b2e2157165d2837e7e8d70eaa
|
[
"BSD-3-Clause"
] | null | null | null |
"0.3.7"
__version__ = (0, 3, 7, None, None)
| 14.666667
| 35
| 0.568182
| 9
| 44
| 2.333333
| 0.555556
| 0.190476
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 0.181818
| 44
| 2
| 36
| 22
| 0.416667
| 0.113636
| 0
| 0
| 0
| 0
| 0.113636
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
f95327286721c8748735c28689b182e0709ce52b
| 42
|
py
|
Python
|
set_ings/exceptions.py
|
evocount/python-set-ings
|
365dabaa6def3298f0ab974811f0fa5df1cc1993
|
[
"MIT"
] | null | null | null |
set_ings/exceptions.py
|
evocount/python-set-ings
|
365dabaa6def3298f0ab974811f0fa5df1cc1993
|
[
"MIT"
] | null | null | null |
set_ings/exceptions.py
|
evocount/python-set-ings
|
365dabaa6def3298f0ab974811f0fa5df1cc1993
|
[
"MIT"
] | null | null | null |
class SettingsError(Exception):
pass
| 10.5
| 31
| 0.738095
| 4
| 42
| 7.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.190476
| 42
| 3
| 32
| 14
| 0.911765
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.