hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b9678f27461c722fc89e4edb93a357ffded7a5ae
| 132
|
py
|
Python
|
tests/missing_data/test_missing_data_air_passengers_DiscardRow_None.py
|
shaido987/pyaf
|
b9afd089557bed6b90b246d3712c481ae26a1957
|
[
"BSD-3-Clause"
] | 377
|
2016-10-13T20:52:44.000Z
|
2022-03-29T18:04:14.000Z
|
tests/missing_data/test_missing_data_air_passengers_DiscardRow_None.py
|
ysdede/pyaf
|
b5541b8249d5a1cfdc01f27fdfd99b6580ed680b
|
[
"BSD-3-Clause"
] | 160
|
2016-10-13T16:11:53.000Z
|
2022-03-28T04:21:34.000Z
|
tests/missing_data/test_missing_data_air_passengers_DiscardRow_None.py
|
ysdede/pyaf
|
b5541b8249d5a1cfdc01f27fdfd99b6580ed680b
|
[
"BSD-3-Clause"
] | 63
|
2017-03-09T14:51:18.000Z
|
2022-03-27T20:52:57.000Z
|
import tests.missing_data.test_missing_data_air_passengers_generic as gen
gen.test_air_passengers_missing_data('DiscardRow', None)
| 33
| 73
| 0.886364
| 20
| 132
| 5.35
| 0.6
| 0.308411
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.05303
| 132
| 3
| 74
| 44
| 0.856
| 0
| 0
| 0
| 0
| 0
| 0.075758
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 1
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 6
|
b96b22bb81b0a9151d3781c08783a7e680e8cd3d
| 1,226
|
gyp
|
Python
|
client/build/i18n_input_engine.gyp
|
zamorajavi/google-input-tools
|
fc9f11d80d957560f7accf85a5fc27dd23625f70
|
[
"Apache-2.0"
] | 175
|
2015-01-01T12:40:33.000Z
|
2019-05-24T22:33:59.000Z
|
client/build/i18n_input_engine.gyp
|
zamorajavi/google-input-tools
|
fc9f11d80d957560f7accf85a5fc27dd23625f70
|
[
"Apache-2.0"
] | 11
|
2015-01-19T16:30:56.000Z
|
2018-04-25T01:06:52.000Z
|
client/build/i18n_input_engine.gyp
|
zamorajavi/google-input-tools
|
fc9f11d80d957560f7accf85a5fc27dd23625f70
|
[
"Apache-2.0"
] | 97
|
2015-01-19T15:35:29.000Z
|
2019-05-15T05:48:02.000Z
|
{
'variables': {
'ENGINE_ROOT': '<(GOOGLE3)/i18n/input/engine',
},
'conditions': [
['OS!="win"', {
'variables': {
'PROTOC': '<!(which protoc)',
},
}],
],
'targets': [
{
'target_name': 'stubs',
'type': '<(library)',
'sources': [
'<(ENGINE_ROOT)/stubs/google3/base/commandlineflags.cc',
'<(ENGINE_ROOT)/stubs/google3/base/commandlineflags_reporting.cc',
'<(ENGINE_ROOT)/stubs/google3/base/init_google.cc',
'<(ENGINE_ROOT)/stubs/google3/base/logging.cc',
'<(ENGINE_ROOT)/stubs/google3/base/mutex.cc',
'<(ENGINE_ROOT)/stubs/google3/base/scoped_ptr_internals.cc',
'<(ENGINE_ROOT)/stubs/google3/base/sysinfo.cc',
'<(ENGINE_ROOT)/stubs/google3/base/vlog_is_on.cc',
],
'conditions': [
['OS=="win"', {
'sources': [
'<(ENGINE_ROOT)/stubs/google3/base/mutex-internal-win.cc',
'<(ENGINE_ROOT)/stubs/posix/sys/mman.cc',
'<(ENGINE_ROOT)/stubs/posix/sys/time.cc',
'<(ENGINE_ROOT)/stubs/posix/unistd.cc',
],
'include_dirs': [
'<(ENGINE_ROOT)/stubs/posix/',
],
}],
],
},
]
}
| 29.190476
| 74
| 0.527732
| 122
| 1,226
| 5.122951
| 0.336066
| 0.224
| 0.312
| 0.272
| 0.6016
| 0.5664
| 0
| 0
| 0
| 0
| 0
| 0.013304
| 0.264274
| 1,226
| 41
| 75
| 29.902439
| 0.679601
| 0
| 0
| 0.317073
| 0
| 0
| 0.62969
| 0.50571
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b9b7f5b3092a63cc5fca83642417fbe382b41966
| 1,376
|
py
|
Python
|
tests/comprehension_test.py
|
mxr/unkey
|
5245570cc3599a6750273f3f61d77e1fc99e3593
|
[
"MIT"
] | 2
|
2021-08-04T04:50:39.000Z
|
2022-02-01T21:38:34.000Z
|
tests/comprehension_test.py
|
mxr/unkey
|
13188911df145b9ea6369fadd47a510948eb8b5b
|
[
"MIT"
] | null | null | null |
tests/comprehension_test.py
|
mxr/unkey
|
13188911df145b9ea6369fadd47a510948eb8b5b
|
[
"MIT"
] | null | null | null |
import pytest
from unkey import _fix
@pytest.mark.parametrize(
("s", "expected"),
(
pytest.param("[x for x in d.keys()]", "[x for x in d]", id="attr list comp"),
pytest.param(
"[x for x in {}.keys()]", "[x for x in {}]", id="literal list comp"
),
pytest.param(
"[x for x in f().keys()]", "[x for x in f()]", id="func list comp"
),
pytest.param("(x for x in d.keys())", "(x for x in d)", id="attr gen exp"),
pytest.param("(x for x in {}.keys())", "(x for x in {})", id="literal gen exp"),
pytest.param("(x for x in f().keys())", "(x for x in f())", id="func gen exp"),
pytest.param("{x for x in d.keys()}", "{x for x in d}", id="attr set comp"),
pytest.param(
"{x for x in {}.keys()}", "{x for x in {}}", id="literal set comp"
),
pytest.param("{x for x in f().keys()}", "{x for x in f()}", id="func set comp"),
pytest.param(
"{x:x for x in d.keys()}", "{x:x for x in d}", id="attr dict comp"
),
pytest.param(
"{x:x for x in {}.keys()}", "{x:x for x in {}}", id="literal dict comp"
),
pytest.param(
"{x:x for x in f().keys()}", "{x:x for x in f()}", id="func dict comp"
),
),
)
def test_comprehensions(s, expected):
assert _fix(s) == expected
| 37.189189
| 88
| 0.473837
| 222
| 1,376
| 2.923423
| 0.135135
| 0.14792
| 0.1849
| 0.25886
| 0.821263
| 0.816641
| 0.784284
| 0.72265
| 0.625578
| 0.542373
| 0
| 0
| 0.306686
| 1,376
| 36
| 89
| 38.222222
| 0.680294
| 0
| 0
| 0.393939
| 0
| 0
| 0.462209
| 0
| 0
| 0
| 0
| 0
| 0.030303
| 1
| 0.030303
| false
| 0
| 0.060606
| 0
| 0.090909
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b9bdfa732965931492e679f92a50c4ba7a1297c5
| 26,194
|
py
|
Python
|
mayaSDK/PySide2/QtQml.py
|
FXTD-ODYSSEY/vscode-mayapy
|
7a21872f80b5b740fc653e79c3f9b5268e87b3c3
|
[
"MIT"
] | 20
|
2019-09-20T00:30:22.000Z
|
2021-12-26T06:56:16.000Z
|
mayaSDK/PySide2/QtQml.py
|
minjiang999/vscode-mayapy
|
7a21872f80b5b740fc653e79c3f9b5268e87b3c3
|
[
"MIT"
] | 5
|
2019-12-29T15:19:03.000Z
|
2022-03-29T16:54:19.000Z
|
mayaSDK/PySide2/QtQml.py
|
minjiang999/vscode-mayapy
|
7a21872f80b5b740fc653e79c3f9b5268e87b3c3
|
[
"MIT"
] | 8
|
2019-09-23T05:46:44.000Z
|
2022-01-11T14:42:14.000Z
|
from PySide2.QtCore import QObject as _QObject
class _Object(object):
__dict__ = None
class VolatileBool(object):
"""
VolatileBool objects contain a C++ volatile bool
"""
def __repr__(*args, **kwargs):
"""
x.__repr__() <==> repr(x)
"""
pass
def __str__(*args, **kwargs):
"""
x.__str__() <==> str(x)
"""
pass
def get(*args, **kwargs):
"""
B.get() -> Bool. Returns the value of the volatile boolean
"""
pass
def set(*args, **kwargs):
"""
B.set(a) -> None. Sets the value of the volatile boolean
"""
pass
__new__ = None
class _Property(object):
def __call__(*args, **kwargs):
"""
x.__call__(...) <==> x(...)
"""
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def getter(*args, **kwargs):
pass
def read(*args, **kwargs):
pass
def setter(*args, **kwargs):
pass
def write(*args, **kwargs):
pass
__new__ = None
class QQmlContext(_QObject):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def baseUrl(*args, **kwargs):
pass
def contextObject(*args, **kwargs):
pass
def contextProperty(*args, **kwargs):
pass
def engine(*args, **kwargs):
pass
def isValid(*args, **kwargs):
pass
def nameForObject(*args, **kwargs):
pass
def parentContext(*args, **kwargs):
pass
def resolvedUrl(*args, **kwargs):
pass
def setBaseUrl(*args, **kwargs):
pass
def setContextObject(*args, **kwargs):
pass
def setContextProperty(*args, **kwargs):
pass
__new__ = None
staticMetaObject = None
class QQmlImageProviderBase(_Object):
def flags(*args, **kwargs):
pass
def imageType(*args, **kwargs):
pass
Flag = None
Flags = None
ForceAsynchronousImageLoading = None
Image = None
ImageResponse = None
ImageType = None
Invalid = None
Pixmap = None
Texture = None
__new__ = None
class QQmlDebuggingEnabler(_Object):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def connectToLocalDebugger(*args, **kwargs):
pass
def startTcpDebugServer(*args, **kwargs):
pass
DoNotWaitForClient = None
StartMode = None
WaitForClient = None
__new__ = None
class QQmlProperty(_Object):
def __copy__(*args, **kwargs):
pass
def __eq__(*args, **kwargs):
"""
x.__eq__(y) <==> x==y
"""
pass
def __ge__(*args, **kwargs):
"""
x.__ge__(y) <==> x>=y
"""
pass
def __getattribute__(*args, **kwargs):
"""
x.__getattribute__('name') <==> x.name
"""
pass
def __gt__(*args, **kwargs):
"""
x.__gt__(y) <==> x>y
"""
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __le__(*args, **kwargs):
"""
x.__le__(y) <==> x<=y
"""
pass
def __lt__(*args, **kwargs):
"""
x.__lt__(y) <==> x<y
"""
pass
def __ne__(*args, **kwargs):
"""
x.__ne__(y) <==> x!=y
"""
pass
def connectNotifySignal(*args, **kwargs):
pass
def hasNotifySignal(*args, **kwargs):
pass
def index(*args, **kwargs):
pass
def isDesignable(*args, **kwargs):
pass
def isProperty(*args, **kwargs):
pass
def isResettable(*args, **kwargs):
pass
def isSignalProperty(*args, **kwargs):
pass
def isValid(*args, **kwargs):
pass
def isWritable(*args, **kwargs):
pass
def method(*args, **kwargs):
pass
def name(*args, **kwargs):
pass
def needsNotifySignal(*args, **kwargs):
pass
def object(*args, **kwargs):
pass
def property(*args, **kwargs):
pass
def propertyType(*args, **kwargs):
pass
def propertyTypeCategory(*args, **kwargs):
pass
def propertyTypeName(*args, **kwargs):
pass
def reset(*args, **kwargs):
pass
def type(*args, **kwargs):
pass
def read(*args, **kwargs):
pass
def write(*args, **kwargs):
pass
Invalid = None
InvalidCategory = None
List = None
Normal = None
Object = None
Property = None
PropertyTypeCategory = None
SignalProperty = None
Type = None
__new__ = None
class QQmlIncubationController(_Object):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def engine(*args, **kwargs):
pass
def incubateFor(*args, **kwargs):
pass
def incubateWhile(*args, **kwargs):
pass
def incubatingObjectCount(*args, **kwargs):
pass
def incubatingObjectCountChanged(*args, **kwargs):
pass
__new__ = None
class QQmlPropertyValueSource(_Object):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def setTarget(*args, **kwargs):
pass
__new__ = None
class QQmlNetworkAccessManagerFactory(_Object):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def create(*args, **kwargs):
pass
__new__ = None
class QQmlExtensionInterface(_Object):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def initializeEngine(*args, **kwargs):
pass
__new__ = None
class QJSValue(_Object):
def __copy__(*args, **kwargs):
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __nonzero__(*args, **kwargs):
"""
x.__nonzero__() <==> x != 0
"""
pass
def call(*args, **kwargs):
pass
def callAsConstructor(*args, **kwargs):
pass
def callWithInstance(*args, **kwargs):
pass
def deleteProperty(*args, **kwargs):
pass
def engine(*args, **kwargs):
pass
def equals(*args, **kwargs):
pass
def hasOwnProperty(*args, **kwargs):
pass
def hasProperty(*args, **kwargs):
pass
def isArray(*args, **kwargs):
pass
def isBool(*args, **kwargs):
pass
def isCallable(*args, **kwargs):
pass
def isDate(*args, **kwargs):
pass
def isError(*args, **kwargs):
pass
def isNull(*args, **kwargs):
pass
def isNumber(*args, **kwargs):
pass
def isObject(*args, **kwargs):
pass
def isQObject(*args, **kwargs):
pass
def isRegExp(*args, **kwargs):
pass
def isString(*args, **kwargs):
pass
def isUndefined(*args, **kwargs):
pass
def isVariant(*args, **kwargs):
pass
def property(*args, **kwargs):
pass
def prototype(*args, **kwargs):
pass
def setProperty(*args, **kwargs):
pass
def setPrototype(*args, **kwargs):
pass
def strictlyEquals(*args, **kwargs):
pass
def toBool(*args, **kwargs):
pass
def toDateTime(*args, **kwargs):
pass
def toInt(*args, **kwargs):
pass
def toNumber(*args, **kwargs):
pass
def toQObject(*args, **kwargs):
pass
def toString(*args, **kwargs):
pass
def toUInt(*args, **kwargs):
pass
def toVariant(*args, **kwargs):
pass
NullValue = None
SpecialValue = None
UndefinedValue = None
__new__ = None
class QJSEngine(_QObject):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def collectGarbage(*args, **kwargs):
pass
def evaluate(*args, **kwargs):
pass
def globalObject(*args, **kwargs):
pass
def installExtensions(*args, **kwargs):
pass
def installTranslatorFunctions(*args, **kwargs):
pass
def newArray(*args, **kwargs):
pass
def newObject(*args, **kwargs):
pass
def newQObject(*args, **kwargs):
pass
def toScriptValue(*args, **kwargs):
pass
AllExtensions = None
ConsoleExtension = None
Extension = None
Extensions = None
GarbageCollectionExtension = None
TranslationExtension = None
__new__ = None
staticMetaObject = None
class QQmlError(_Object):
def __copy__(*args, **kwargs):
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __repr__(*args, **kwargs):
"""
x.__repr__() <==> repr(x)
"""
pass
def column(*args, **kwargs):
pass
def description(*args, **kwargs):
pass
def isValid(*args, **kwargs):
pass
def line(*args, **kwargs):
pass
def object(*args, **kwargs):
pass
def setColumn(*args, **kwargs):
pass
def setDescription(*args, **kwargs):
pass
def setLine(*args, **kwargs):
pass
def setObject(*args, **kwargs):
pass
def setUrl(*args, **kwargs):
pass
def toString(*args, **kwargs):
pass
def url(*args, **kwargs):
pass
__new__ = None
class QQmlParserStatus(_Object):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def classBegin(*args, **kwargs):
pass
def componentComplete(*args, **kwargs):
pass
__new__ = None
class QQmlFileSelector(_QObject):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def setExtraSelectors(*args, **kwargs):
pass
def setSelector(*args, **kwargs):
pass
def get(*args, **kwargs):
pass
__new__ = None
staticMetaObject = None
class QQmlAbstractUrlInterceptor(_Object):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def intercept(*args, **kwargs):
pass
DataType = None
JavaScriptFile = None
QmlFile = None
QmldirFile = None
UrlString = None
__new__ = None
class QQmlExpression(_QObject):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def clearError(*args, **kwargs):
pass
def columnNumber(*args, **kwargs):
pass
def context(*args, **kwargs):
pass
def engine(*args, **kwargs):
pass
def error(*args, **kwargs):
pass
def evaluate(*args, **kwargs):
pass
def expression(*args, **kwargs):
pass
def hasError(*args, **kwargs):
pass
def lineNumber(*args, **kwargs):
pass
def notifyOnValueChanged(*args, **kwargs):
pass
def scopeObject(*args, **kwargs):
pass
def setExpression(*args, **kwargs):
pass
def setNotifyOnValueChanged(*args, **kwargs):
pass
def setSourceLocation(*args, **kwargs):
pass
def sourceFile(*args, **kwargs):
pass
__new__ = None
staticMetaObject = None
valueChanged = None
class QQmlComponent(_QObject):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __nonzero__(*args, **kwargs):
"""
x.__nonzero__() <==> x != 0
"""
pass
def beginCreate(*args, **kwargs):
pass
def completeCreate(*args, **kwargs):
pass
def create(*args, **kwargs):
pass
def creationContext(*args, **kwargs):
pass
def errorString(*args, **kwargs):
pass
def errors(*args, **kwargs):
pass
def isError(*args, **kwargs):
pass
def isLoading(*args, **kwargs):
pass
def isNull(*args, **kwargs):
pass
def isReady(*args, **kwargs):
pass
def loadUrl(*args, **kwargs):
pass
def progress(*args, **kwargs):
pass
def setData(*args, **kwargs):
pass
def status(*args, **kwargs):
pass
def url(*args, **kwargs):
pass
Asynchronous = None
CompilationMode = None
Error = None
Loading = None
Null = None
PreferSynchronous = None
Ready = None
Status = None
__new__ = None
progressChanged = None
staticMetaObject = None
statusChanged = None
class QQmlIncubator(_Object):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __nonzero__(*args, **kwargs):
"""
x.__nonzero__() <==> x != 0
"""
pass
def clear(*args, **kwargs):
pass
def errors(*args, **kwargs):
pass
def forceCompletion(*args, **kwargs):
pass
def incubationMode(*args, **kwargs):
pass
def isError(*args, **kwargs):
pass
def isLoading(*args, **kwargs):
pass
def isNull(*args, **kwargs):
pass
def isReady(*args, **kwargs):
pass
def object(*args, **kwargs):
pass
def setInitialState(*args, **kwargs):
pass
def status(*args, **kwargs):
pass
def statusChanged(*args, **kwargs):
pass
Asynchronous = None
AsynchronousIfNested = None
Error = None
IncubationMode = None
Loading = None
Null = None
Ready = None
Status = None
Synchronous = None
__new__ = None
class QQmlTypesExtensionInterface(_Object):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def registerTypes(*args, **kwargs):
pass
__new__ = None
class QQmlPropertyMap(_QObject):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def clear(*args, **kwargs):
pass
def contains(*args, **kwargs):
pass
def count(*args, **kwargs):
pass
def insert(*args, **kwargs):
pass
def isEmpty(*args, **kwargs):
pass
def keys(*args, **kwargs):
pass
def size(*args, **kwargs):
pass
def updateValue(*args, **kwargs):
pass
def value(*args, **kwargs):
pass
__new__ = None
staticMetaObject = None
valueChanged = None
class QQmlScriptString(_Object):
def __copy__(*args, **kwargs):
pass
def __eq__(*args, **kwargs):
"""
x.__eq__(y) <==> x==y
"""
pass
def __ge__(*args, **kwargs):
"""
x.__ge__(y) <==> x>=y
"""
pass
def __gt__(*args, **kwargs):
"""
x.__gt__(y) <==> x>y
"""
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __le__(*args, **kwargs):
"""
x.__le__(y) <==> x<=y
"""
pass
def __lt__(*args, **kwargs):
"""
x.__lt__(y) <==> x<y
"""
pass
def __ne__(*args, **kwargs):
"""
x.__ne__(y) <==> x!=y
"""
pass
def booleanLiteral(*args, **kwargs):
pass
def isEmpty(*args, **kwargs):
pass
def isNullLiteral(*args, **kwargs):
pass
def isUndefinedLiteral(*args, **kwargs):
pass
def numberLiteral(*args, **kwargs):
pass
def stringLiteral(*args, **kwargs):
pass
__new__ = None
class QJSValueIterator(_Object):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def hasNext(*args, **kwargs):
pass
def name(*args, **kwargs):
pass
def next(*args, **kwargs):
pass
def value(*args, **kwargs):
pass
__new__ = None
class QQmlListReference(_Object):
def __copy__(*args, **kwargs):
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def append(*args, **kwargs):
pass
def at(*args, **kwargs):
pass
def canAppend(*args, **kwargs):
pass
def canAt(*args, **kwargs):
pass
def canClear(*args, **kwargs):
pass
def canCount(*args, **kwargs):
pass
def clear(*args, **kwargs):
pass
def count(*args, **kwargs):
pass
def isManipulable(*args, **kwargs):
pass
def isReadable(*args, **kwargs):
pass
def isValid(*args, **kwargs):
pass
def listElementType(*args, **kwargs):
pass
def object(*args, **kwargs):
pass
__new__ = None
class QQmlFile(_Object):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __nonzero__(*args, **kwargs):
"""
x.__nonzero__() <==> x != 0
"""
pass
def clear(*args, **kwargs):
pass
def connectDownloadProgress(*args, **kwargs):
pass
def connectFinished(*args, **kwargs):
pass
def data(*args, **kwargs):
pass
def dataByteArray(*args, **kwargs):
pass
def error(*args, **kwargs):
pass
def isError(*args, **kwargs):
pass
def isLoading(*args, **kwargs):
pass
def isNull(*args, **kwargs):
pass
def isReady(*args, **kwargs):
pass
def load(*args, **kwargs):
pass
def size(*args, **kwargs):
pass
def status(*args, **kwargs):
pass
def url(*args, **kwargs):
pass
def isLocalFile(*args, **kwargs):
pass
def isSynchronous(*args, **kwargs):
pass
def urlToLocalFileOrQrc(*args, **kwargs):
pass
Error = None
Loading = None
Null = None
Ready = None
Status = None
__new__ = None
class ListProperty(_Property):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
class QQmlEngine(QJSEngine):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def addImageProvider(*args, **kwargs):
pass
def addImportPath(*args, **kwargs):
pass
def addNamedBundle(*args, **kwargs):
pass
def addPluginPath(*args, **kwargs):
pass
def baseUrl(*args, **kwargs):
pass
def clearComponentCache(*args, **kwargs):
pass
def event(*args, **kwargs):
pass
def imageProvider(*args, **kwargs):
pass
def importPathList(*args, **kwargs):
pass
def importPlugin(*args, **kwargs):
pass
def incubationController(*args, **kwargs):
pass
def networkAccessManager(*args, **kwargs):
pass
def networkAccessManagerFactory(*args, **kwargs):
pass
def offlineStoragePath(*args, **kwargs):
pass
def outputWarningsToStandardError(*args, **kwargs):
pass
def pluginPathList(*args, **kwargs):
pass
def removeImageProvider(*args, **kwargs):
pass
def rootContext(*args, **kwargs):
pass
def setBaseUrl(*args, **kwargs):
pass
def setImportPathList(*args, **kwargs):
pass
def setIncubationController(*args, **kwargs):
pass
def setNetworkAccessManagerFactory(*args, **kwargs):
pass
def setOfflineStoragePath(*args, **kwargs):
pass
def setOutputWarningsToStandardError(*args, **kwargs):
pass
def setPluginPathList(*args, **kwargs):
pass
def setUrlInterceptor(*args, **kwargs):
pass
def trimComponentCache(*args, **kwargs):
pass
def urlInterceptor(*args, **kwargs):
pass
def contextForObject(*args, **kwargs):
pass
def objectOwnership(*args, **kwargs):
pass
def setContextForObject(*args, **kwargs):
pass
def setObjectOwnership(*args, **kwargs):
pass
CppOwnership = None
JavaScriptOwnership = None
ObjectOwnership = None
__new__ = None
quit = None
staticMetaObject = None
warnings = None
class QQmlExtensionPlugin(_QObject, QQmlExtensionInterface):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def baseUrl(*args, **kwargs):
pass
def initializeEngine(*args, **kwargs):
pass
def registerTypes(*args, **kwargs):
pass
__new__ = None
staticMetaObject = None
class QQmlApplicationEngine(QQmlEngine):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def load(*args, **kwargs):
pass
def loadData(*args, **kwargs):
pass
def rootObjects(*args, **kwargs):
pass
__new__ = None
objectCreated = None
staticMetaObject = None
def qmlRegisterType(*args, **kwargs):
pass
QML_HAS_ATTACHED_PROPERTIES = 1
| 15.06268
| 70
| 0.457433
| 2,182
| 26,194
| 5.238772
| 0.146196
| 0.25807
| 0.300061
| 0.322719
| 0.48316
| 0.471875
| 0.444755
| 0.426034
| 0.399703
| 0.28379
| 0
| 0.000399
| 0.425288
| 26,194
| 1,738
| 71
| 15.071346
| 0.758935
| 0.090861
| 0
| 0.657497
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.405777
| false
| 0.405777
| 0.006878
| 0
| 0.598349
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
b9d1f0dd73f31b5e7afe7b66a7f352bc783bd42a
| 74
|
py
|
Python
|
app/engine/NoReturnFig/Loop/Break.py
|
normidar/py-visibility-coding-alpha
|
479d0c928f325178fc383dc1af0014cdf9771d1d
|
[
"MIT"
] | null | null | null |
app/engine/NoReturnFig/Loop/Break.py
|
normidar/py-visibility-coding-alpha
|
479d0c928f325178fc383dc1af0014cdf9771d1d
|
[
"MIT"
] | null | null | null |
app/engine/NoReturnFig/Loop/Break.py
|
normidar/py-visibility-coding-alpha
|
479d0c928f325178fc383dc1af0014cdf9771d1d
|
[
"MIT"
] | null | null | null |
from ..NoReturnFig import NoReturnFig
class Break(NoReturnFig):
pass
| 14.8
| 37
| 0.77027
| 8
| 74
| 7.125
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.162162
| 74
| 4
| 38
| 18.5
| 0.919355
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
b9d7d6797a54ea35ab52e099269ba5314a8f950e
| 12,764
|
py
|
Python
|
cottonformation/res/servicecatalogappregistry.py
|
gitter-badger/cottonformation-project
|
354f1dce7ea106e209af2d5d818b6033a27c193c
|
[
"BSD-2-Clause"
] | 5
|
2021-07-22T03:45:59.000Z
|
2021-12-17T21:07:14.000Z
|
cottonformation/res/servicecatalogappregistry.py
|
gitter-badger/cottonformation-project
|
354f1dce7ea106e209af2d5d818b6033a27c193c
|
[
"BSD-2-Clause"
] | 1
|
2021-06-25T18:01:31.000Z
|
2021-06-25T18:01:31.000Z
|
cottonformation/res/servicecatalogappregistry.py
|
gitter-badger/cottonformation-project
|
354f1dce7ea106e209af2d5d818b6033a27c193c
|
[
"BSD-2-Clause"
] | 2
|
2021-06-27T03:08:21.000Z
|
2021-06-28T22:15:51.000Z
|
# -*- coding: utf-8 -*-
"""
This module
"""
import attr
import typing
from ..core.model import (
Property, Resource, Tag, GetAtt, TypeHint, TypeCheck,
)
from ..core.constant import AttrMeta
#--- Property declaration ---
#--- Resource declaration ---
@attr.s
class Application(Resource):
"""
AWS Object Type = "AWS::ServiceCatalogAppRegistry::Application"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-servicecatalogappregistry-application.html
Property Document:
- ``rp_Name``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-servicecatalogappregistry-application.html#cfn-servicecatalogappregistry-application-name
- ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-servicecatalogappregistry-application.html#cfn-servicecatalogappregistry-application-description
- ``p_Tags``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-servicecatalogappregistry-application.html#cfn-servicecatalogappregistry-application-tags
"""
AWS_OBJECT_TYPE = "AWS::ServiceCatalogAppRegistry::Application"
rp_Name: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Name"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-servicecatalogappregistry-application.html#cfn-servicecatalogappregistry-application-name"""
p_Description: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Description"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-servicecatalogappregistry-application.html#cfn-servicecatalogappregistry-application-description"""
p_Tags: typing.Dict[str, TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_mapping(key_validator=attr.validators.instance_of(str), value_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type))),
metadata={AttrMeta.PROPERTY_NAME: "Tags"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-servicecatalogappregistry-application.html#cfn-servicecatalogappregistry-application-tags"""
@property
def rv_Id(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-servicecatalogappregistry-application.html#aws-resource-servicecatalogappregistry-application-return-values"""
return GetAtt(resource=self, attr_name="Id")
@property
def rv_Arn(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-servicecatalogappregistry-application.html#aws-resource-servicecatalogappregistry-application-return-values"""
return GetAtt(resource=self, attr_name="Arn")
@attr.s
class ResourceAssociation(Resource):
"""
AWS Object Type = "AWS::ServiceCatalogAppRegistry::ResourceAssociation"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-servicecatalogappregistry-resourceassociation.html
Property Document:
- ``rp_Application``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-servicecatalogappregistry-resourceassociation.html#cfn-servicecatalogappregistry-resourceassociation-application
- ``rp_Resource``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-servicecatalogappregistry-resourceassociation.html#cfn-servicecatalogappregistry-resourceassociation-resource
- ``rp_ResourceType``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-servicecatalogappregistry-resourceassociation.html#cfn-servicecatalogappregistry-resourceassociation-resourcetype
"""
AWS_OBJECT_TYPE = "AWS::ServiceCatalogAppRegistry::ResourceAssociation"
rp_Application: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Application"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-servicecatalogappregistry-resourceassociation.html#cfn-servicecatalogappregistry-resourceassociation-application"""
rp_Resource: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Resource"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-servicecatalogappregistry-resourceassociation.html#cfn-servicecatalogappregistry-resourceassociation-resource"""
rp_ResourceType: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "ResourceType"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-servicecatalogappregistry-resourceassociation.html#cfn-servicecatalogappregistry-resourceassociation-resourcetype"""
@property
def rv_ApplicationArn(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-servicecatalogappregistry-resourceassociation.html#aws-resource-servicecatalogappregistry-resourceassociation-return-values"""
return GetAtt(resource=self, attr_name="ApplicationArn")
@property
def rv_ResourceArn(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-servicecatalogappregistry-resourceassociation.html#aws-resource-servicecatalogappregistry-resourceassociation-return-values"""
return GetAtt(resource=self, attr_name="ResourceArn")
@property
def rv_Id(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-servicecatalogappregistry-resourceassociation.html#aws-resource-servicecatalogappregistry-resourceassociation-return-values"""
return GetAtt(resource=self, attr_name="Id")
@attr.s
class AttributeGroup(Resource):
"""
AWS Object Type = "AWS::ServiceCatalogAppRegistry::AttributeGroup"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-servicecatalogappregistry-attributegroup.html
Property Document:
- ``rp_Attributes``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-servicecatalogappregistry-attributegroup.html#cfn-servicecatalogappregistry-attributegroup-attributes
- ``rp_Name``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-servicecatalogappregistry-attributegroup.html#cfn-servicecatalogappregistry-attributegroup-name
- ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-servicecatalogappregistry-attributegroup.html#cfn-servicecatalogappregistry-attributegroup-description
- ``p_Tags``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-servicecatalogappregistry-attributegroup.html#cfn-servicecatalogappregistry-attributegroup-tags
"""
AWS_OBJECT_TYPE = "AWS::ServiceCatalogAppRegistry::AttributeGroup"
rp_Attributes: dict = attr.ib(
default=None,
validator=attr.validators.instance_of(dict),
metadata={AttrMeta.PROPERTY_NAME: "Attributes"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-servicecatalogappregistry-attributegroup.html#cfn-servicecatalogappregistry-attributegroup-attributes"""
rp_Name: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Name"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-servicecatalogappregistry-attributegroup.html#cfn-servicecatalogappregistry-attributegroup-name"""
p_Description: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Description"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-servicecatalogappregistry-attributegroup.html#cfn-servicecatalogappregistry-attributegroup-description"""
p_Tags: typing.Dict[str, TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_mapping(key_validator=attr.validators.instance_of(str), value_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type))),
metadata={AttrMeta.PROPERTY_NAME: "Tags"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-servicecatalogappregistry-attributegroup.html#cfn-servicecatalogappregistry-attributegroup-tags"""
@property
def rv_Id(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-servicecatalogappregistry-attributegroup.html#aws-resource-servicecatalogappregistry-attributegroup-return-values"""
return GetAtt(resource=self, attr_name="Id")
@property
def rv_Arn(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-servicecatalogappregistry-attributegroup.html#aws-resource-servicecatalogappregistry-attributegroup-return-values"""
return GetAtt(resource=self, attr_name="Arn")
@attr.s
class AttributeGroupAssociation(Resource):
"""
AWS Object Type = "AWS::ServiceCatalogAppRegistry::AttributeGroupAssociation"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-servicecatalogappregistry-attributegroupassociation.html
Property Document:
- ``rp_Application``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-servicecatalogappregistry-attributegroupassociation.html#cfn-servicecatalogappregistry-attributegroupassociation-application
- ``rp_AttributeGroup``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-servicecatalogappregistry-attributegroupassociation.html#cfn-servicecatalogappregistry-attributegroupassociation-attributegroup
"""
AWS_OBJECT_TYPE = "AWS::ServiceCatalogAppRegistry::AttributeGroupAssociation"
rp_Application: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Application"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-servicecatalogappregistry-attributegroupassociation.html#cfn-servicecatalogappregistry-attributegroupassociation-application"""
rp_AttributeGroup: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "AttributeGroup"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-servicecatalogappregistry-attributegroupassociation.html#cfn-servicecatalogappregistry-attributegroupassociation-attributegroup"""
@property
def rv_ApplicationArn(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-servicecatalogappregistry-attributegroupassociation.html#aws-resource-servicecatalogappregistry-attributegroupassociation-return-values"""
return GetAtt(resource=self, attr_name="ApplicationArn")
@property
def rv_AttributeGroupArn(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-servicecatalogappregistry-attributegroupassociation.html#aws-resource-servicecatalogappregistry-attributegroupassociation-return-values"""
return GetAtt(resource=self, attr_name="AttributeGroupArn")
@property
def rv_Id(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-servicecatalogappregistry-attributegroupassociation.html#aws-resource-servicecatalogappregistry-attributegroupassociation-return-values"""
return GetAtt(resource=self, attr_name="Id")
| 58.820276
| 231
| 0.776637
| 1,253
| 12,764
| 7.818037
| 0.059058
| 0.0539
| 0.176399
| 0.065945
| 0.953859
| 0.953859
| 0.903022
| 0.903022
| 0.903022
| 0.897509
| 0
| 0.000088
| 0.104669
| 12,764
| 216
| 232
| 59.092593
| 0.857105
| 0.427139
| 0
| 0.638889
| 0
| 0
| 0.077099
| 0.040939
| 0
| 0
| 0
| 0
| 0
| 1
| 0.092593
| false
| 0.018519
| 0.037037
| 0
| 0.407407
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b9e80da292d2c9f03d060b47d11ef742ae0f830d
| 229
|
py
|
Python
|
RNAPuzzles/rnapuzzles/views/user/__init__.py
|
whinyadventure/RNA-Puzzles
|
bbd147e1a0748a77b5e3424a93ad57bb430b5a0e
|
[
"Apache-2.0"
] | null | null | null |
RNAPuzzles/rnapuzzles/views/user/__init__.py
|
whinyadventure/RNA-Puzzles
|
bbd147e1a0748a77b5e3424a93ad57bb430b5a0e
|
[
"Apache-2.0"
] | 26
|
2019-10-08T11:11:25.000Z
|
2022-03-12T00:52:30.000Z
|
RNAPuzzles/rnapuzzles/views/user/__init__.py
|
whinyadventure/RNA-Puzzles
|
bbd147e1a0748a77b5e3424a93ad57bb430b5a0e
|
[
"Apache-2.0"
] | 1
|
2020-05-11T18:51:04.000Z
|
2020-05-11T18:51:04.000Z
|
from .detail import *
from .signin import *
from .signup import *
from .update import *
from .passwordUpdate import *
from .passwordReset import *
from .newPassword import *
from .resetForm import *
from .unconfirmedList import *
| 25.444444
| 30
| 0.768559
| 27
| 229
| 6.518519
| 0.407407
| 0.454545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.152838
| 229
| 9
| 30
| 25.444444
| 0.907216
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 6
|
6a0aaa2cd589f7255a174e83c9cf3538a3359f9b
| 22
|
py
|
Python
|
cride/circles/views/__init__.py
|
AngelFA04/PlatziCride
|
ab37e6e7ff5b552ed799da22f562084592563680
|
[
"MIT"
] | null | null | null |
cride/circles/views/__init__.py
|
AngelFA04/PlatziCride
|
ab37e6e7ff5b552ed799da22f562084592563680
|
[
"MIT"
] | null | null | null |
cride/circles/views/__init__.py
|
AngelFA04/PlatziCride
|
ab37e6e7ff5b552ed799da22f562084592563680
|
[
"MIT"
] | null | null | null |
from . import circles
| 11
| 21
| 0.772727
| 3
| 22
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 22
| 1
| 22
| 22
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6a0d0818368942a4bfc0c3475d8a2e32d4b3175c
| 151
|
py
|
Python
|
google-cloud-sdk/platform/gsutil/third_party/rsa/tests/constants.py
|
bopopescu/searchparty
|
afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6
|
[
"Apache-2.0"
] | 8
|
2016-02-08T11:59:31.000Z
|
2020-05-31T15:19:54.000Z
|
google-cloud-sdk/platform/gsutil/third_party/rsa/tests/constants.py
|
bopopescu/searchparty
|
afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6
|
[
"Apache-2.0"
] | 1
|
2021-02-23T22:20:14.000Z
|
2021-02-23T22:20:14.000Z
|
google-cloud-sdk/platform/gsutil/third_party/rsa/tests/constants.py
|
bopopescu/searchparty
|
afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6
|
[
"Apache-2.0"
] | 7
|
2016-02-09T09:28:14.000Z
|
2020-07-25T19:03:36.000Z
|
# -*- coding: utf-8 -*-
from rsa._compat import have_python3
if have_python3:
from py3kconstants import *
else:
from py2kconstants import *
| 15.1
| 36
| 0.701987
| 19
| 151
| 5.421053
| 0.684211
| 0.213592
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.041667
| 0.205298
| 151
| 9
| 37
| 16.777778
| 0.816667
| 0.139073
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
dbea7b5668a6caab9724b591b737a24de13987bc
| 167
|
py
|
Python
|
obywatele/admin.py
|
testteammaciej/wikikracja
|
c530a90da5e9fbe7f506f98d7135de2eae3043c8
|
[
"MIT"
] | 7
|
2016-02-21T17:25:54.000Z
|
2021-10-09T19:36:10.000Z
|
obywatele/admin.py
|
soma115/wikikracja
|
7715ca1daa4ca09888e1c7389ed5f8a2df29898b
|
[
"MIT"
] | 19
|
2020-02-11T23:55:01.000Z
|
2022-03-31T18:11:56.000Z
|
obywatele/admin.py
|
testteammaciej/wikikracja
|
c530a90da5e9fbe7f506f98d7135de2eae3043c8
|
[
"MIT"
] | 3
|
2016-01-20T22:34:58.000Z
|
2020-09-16T07:45:42.000Z
|
from django.contrib import admin
# from django.contrib.auth.models import User
# Register your models here.
# admin.site.unregister(User)
# admin.site.register(User)
| 23.857143
| 45
| 0.784431
| 24
| 167
| 5.458333
| 0.541667
| 0.152672
| 0.259542
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113772
| 167
| 6
| 46
| 27.833333
| 0.885135
| 0.742515
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
dbfc539a2f80a3db6bd2eca1dd01d62ffdd19006
| 48
|
py
|
Python
|
pyqtuidoc/fakemods/yselector.py
|
twardoch/pyqtuidoc
|
6c03d7f1311c36acd738fb05f2282dd1f7b5d85d
|
[
"MIT"
] | null | null | null |
pyqtuidoc/fakemods/yselector.py
|
twardoch/pyqtuidoc
|
6c03d7f1311c36acd738fb05f2282dd1f7b5d85d
|
[
"MIT"
] | null | null | null |
pyqtuidoc/fakemods/yselector.py
|
twardoch/pyqtuidoc
|
6c03d7f1311c36acd738fb05f2282dd1f7b5d85d
|
[
"MIT"
] | null | null | null |
from PyQt5.QtWidgets import QLabel as YSelector
| 24
| 47
| 0.854167
| 7
| 48
| 5.857143
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02381
| 0.125
| 48
| 1
| 48
| 48
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e000f45c9400f1d0a092a11d2069952008125f5c
| 24
|
py
|
Python
|
__init__.py
|
smurfix/PushForKiCad
|
22b1110992ba5afd35dd92d14b380177d23e5914
|
[
"MIT"
] | 24
|
2022-01-06T11:15:11.000Z
|
2022-02-06T09:51:39.000Z
|
__init__.py
|
smurfix/PushForKiCad
|
22b1110992ba5afd35dd92d14b380177d23e5914
|
[
"MIT"
] | 13
|
2022-01-06T18:10:05.000Z
|
2022-03-06T12:52:40.000Z
|
__init__.py
|
smurfix/PushForKiCad
|
22b1110992ba5afd35dd92d14b380177d23e5914
|
[
"MIT"
] | 2
|
2022-03-05T11:53:04.000Z
|
2022-03-07T00:55:27.000Z
|
from .src import plugin
| 12
| 23
| 0.791667
| 4
| 24
| 4.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 24
| 1
| 24
| 24
| 0.95
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e056c0d1175b95adb492297891efbad51804b931
| 89
|
py
|
Python
|
sdwan/sdwan/utils.py
|
sambyers/netauto_learning
|
22c1049bf86e188f774f1c977823abea2bb3abfe
|
[
"MIT"
] | null | null | null |
sdwan/sdwan/utils.py
|
sambyers/netauto_learning
|
22c1049bf86e188f774f1c977823abea2bb3abfe
|
[
"MIT"
] | null | null | null |
sdwan/sdwan/utils.py
|
sambyers/netauto_learning
|
22c1049bf86e188f774f1c977823abea2bb3abfe
|
[
"MIT"
] | null | null | null |
from munch import munchify
def dict_to_obj(dictionary):
return munchify(dictionary)
| 17.8
| 31
| 0.797753
| 12
| 89
| 5.75
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.146067
| 89
| 5
| 31
| 17.8
| 0.907895
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
e0614371bd11dc505fbf930753981c97ce9b5e42
| 63,548
|
py
|
Python
|
pyspedas/mms/__init__.py
|
xnchu/pyspedas
|
62657581c0b6ed980fcd99ac34455a8b7a77dede
|
[
"MIT"
] | null | null | null |
pyspedas/mms/__init__.py
|
xnchu/pyspedas
|
62657581c0b6ed980fcd99ac34455a8b7a77dede
|
[
"MIT"
] | null | null | null |
pyspedas/mms/__init__.py
|
xnchu/pyspedas
|
62657581c0b6ed980fcd99ac34455a8b7a77dede
|
[
"MIT"
] | null | null | null |
"""
This module contains routines for loading MMS data
"""
from .mms_load_data import mms_load_data
from .fgm.mms_curl import mms_curl
from .fgm.mms_fgm_remove_flags import mms_fgm_remove_flags
from .fgm.mms_fgm_set_metadata import mms_fgm_set_metadata
from .scm.mms_scm_set_metadata import mms_scm_set_metadata
from .edp.mms_edp_set_metadata import mms_edp_set_metadata
from .dsp.mms_dsp_set_metadata import mms_dsp_set_metadata
from .edi.mms_edi_set_metadata import mms_edi_set_metadata
from .fpi.mms_fpi_set_metadata import mms_fpi_set_metadata
from .mec.mms_mec_set_metadata import mms_mec_set_metadata
from .hpca.mms_hpca_set_metadata import mms_hpca_set_metadata
from .feeps.mms_feeps_correct_energies import mms_feeps_correct_energies
from .feeps.mms_feeps_flat_field_corrections import mms_feeps_flat_field_corrections
from .feeps.mms_feeps_active_eyes import mms_feeps_active_eyes
from .feeps.mms_feeps_split_integral_ch import mms_feeps_split_integral_ch
from .feeps.mms_feeps_remove_bad_data import mms_feeps_remove_bad_data
from .feeps.mms_feeps_remove_sun import mms_feeps_remove_sun
from .feeps.mms_feeps_omni import mms_feeps_omni
from .feeps.mms_feeps_spin_avg import mms_feeps_spin_avg
from .eis.mms_eis_omni import mms_eis_omni
from .eis.mms_eis_spin_avg import mms_eis_spin_avg
from .eis.mms_eis_set_metadata import mms_eis_set_metadata
from pyspedas.mms.mec_ascii.mms_get_state_data import mms_get_state_data
from .mms_config import CONFIG
from pyspedas import tnames
import re
from pytplot import del_data
from functools import wraps
# the following decorator prints the loaded tplot variables after each load routine call
def print_vars(func):
def wrapper(*args, **kwargs):
variables = func(*args, **kwargs)
if variables is None:
return None
if kwargs.get('available') or CONFIG['download_only']:
print('Available files:')
else:
print('Loaded variables:')
for var in variables:
print(var)
return variables
wrapper.__name__ = func.__name__
wrapper.__doc__ = func.__doc__
return wrapper
@print_vars
def mms_load_state(trange=['2015-10-16', '2015-10-17'], probe='1', level='def',
datatypes=['pos', 'vel'], no_update=False, pred_or_def=True, suffix=''):
"""
This function loads the state (ephemeris and attitude) data from the ASCII files
into tplot variables
Parameters:
trange : list of str
time range of interest [starttime, endtime] with the format
'YYYY-MM-DD','YYYY-MM-DD'] or to specify more or less than a day
['YYYY-MM-DD/hh:mm:ss','YYYY-MM-DD/hh:mm:ss']
probe : str or list of str
list of probes, valid values for MMS probes are ['1','2','3','4'].
level : str
indicates level of data (options: 'def' (definitive), 'pred' (predicted); default: def)
datatypes : str or list of str
no datatype for state data (options: 'pos', 'vel', 'spinras', 'spindec')
suffix: str
The tplot variable names will be given this suffix. By default,
no suffix is added.
no_update: bool
Set this flag to preserve the original data. if not set and newer
data is found the existing data will be overwritten
Returns:
List of tplot variables created.
"""
return mms_get_state_data(trange=trange, probe=probe, level=level, datatypes=datatypes,
no_download=no_update, pred_or_def=pred_or_def, suffix=suffix)
@print_vars
def mms_load_fgm(trange=['2015-10-16', '2015-10-17'], probe='1', data_rate='srvy',
level='l2', instrument='fgm', datatype='', varformat=None, varnames=[], suffix='',
keep_flagged=False, get_support_data=True, time_clip=False, no_update=False,
available=False, notplot=False, latest_version=False, major_version=False,
min_version=None, cdf_version=None, spdf=False, always_prompt=False):
"""
This function loads FGM data into tplot variables
Parameters:
trange : list of str
time range of interest [starttime, endtime] with the format
'YYYY-MM-DD','YYYY-MM-DD'] or to specify more or less than a day
['YYYY-MM-DD/hh:mm:ss','YYYY-MM-DD/hh:mm:ss']
probe : str or list of str
list of probes, valid values for MMS probes are ['1','2','3','4'].
data_rate : str or list of str
instrument data rates for FGM include 'brst' 'fast' 'slow' 'srvy'. The
default is 'srvy'.
level : str
indicates level of data processing. the default if no level is specified is 'l2'
datatype : str or list of str
no datatype for FGM instrument (all science data are loaded)
get_support_data: bool
Data with an attribute "VAR_TYPE" with a value of "support_data"
will be loaded into tplot. By default, only loads in data with a
"VAR_TYPE" attribute of "data".
time_clip: bool
Data will be clipped to the exact trange specified by the trange keyword.
varformat: str
The file variable formats to load into tplot. Wildcard character
"*" is accepted. By default, all variables are loaded in.
varnames: list of str
List of variable names to load (if not specified,
all data variables are loaded)
suffix: str
The tplot variable names will be given this suffix. By default,
no suffix is added.
notplot: bool
If True, then data are returned in a hash table instead of
being stored in tplot variables (useful for debugging, and
access to multi-dimensional data products)
available: bool
If True, simply return the available data files (without downloading)
for the requested paramters
no_update: bool
Set this flag to preserve the original data. if not set and newer
data is found the existing data will be overwritten
cdf_version: str
Specify a specific CDF version # to load (e.g., cdf_version='4.3.0')
min_version: str
Specify a minimum CDF version # to load
latest_version: bool
Only grab the latest CDF version in the requested time interval
major_version: bool
Only open the latest major CDF version (e.g., X in vX.Y.Z) in the requested time interval
keep_flagged: bool
If True, don't remove flagged data (flagged data are set to NaNs by
default, this keyword turns this off)
always_prompt: bool
Set this keyword to always prompt for the user's username and password;
useful if you accidently save an incorrect password, or if your SDC password has changed
spdf: bool
If True, download the data from the SPDF instead of the SDC
Returns:
List of tplot variables created.
"""
if (varformat is not None) and (not keep_flagged) and (not available) and (not notplot):
varformat_fetch = varformat+'|*_flag_*'
else:
varformat_fetch = varformat
tvars = mms_load_data(trange=trange, notplot=notplot, probe=probe, data_rate=data_rate, level=level, instrument=instrument,
datatype=datatype, varformat=varformat_fetch, varnames=varnames, suffix=suffix, get_support_data=get_support_data,
time_clip=time_clip, no_update=no_update, available=available, latest_version=latest_version, major_version=major_version,
min_version=min_version, cdf_version=cdf_version, spdf=spdf, always_prompt=always_prompt)
if tvars == None or available or notplot or CONFIG['download_only']:
return tvars
# the probes will need to be strings beyond this point
if isinstance(probe, list):
probe = [str(p) for p in probe]
# remove flagged data
if not keep_flagged:
mms_fgm_remove_flags(probe, data_rate, level, instrument, suffix=suffix)
# Delete the flags variable if it was not originally requested
if varformat is not None:
regex = re.compile(varformat.replace("*", ".*"))
tvars_to_delete = [tvar for tvar in tvars if not re.match(regex, tvar)]
for tvar in tvars_to_delete:
del_data(tvar)
tvars.remove(tvar)
mms_fgm_set_metadata(probe, data_rate, level, instrument, suffix=suffix)
return tvars
@print_vars
def mms_load_hpca(trange=['2015-10-16', '2015-10-17'], probe='1', data_rate='srvy',
level='l2', datatype='moments', get_support_data=None, time_clip=False, no_update=False,
varformat=None, varnames=[], suffix='', center_measurement=False, available=False, notplot=False,
latest_version=False, major_version=False, min_version=None, cdf_version=None, spdf=False,
always_prompt=False):
"""
This function loads HPCA data into tplot variables
Parameters:
trange : list of str
time range of interest [starttime, endtime] with the format
'YYYY-MM-DD','YYYY-MM-DD'] or to specify more or less than a day
['YYYY-MM-DD/hh:mm:ss','YYYY-MM-DD/hh:mm:ss']
probe : str or list of str
list of probes, valid values for MMS probes are ['1','2','3','4'].
data_rate : str or list of str
instrument data rates for HPCA include 'brst', 'srvy'. The
default is 'srvy'.
level : str
indicates level of data processing. the default if no level is specified is 'l2'
datatype : str or list of str
Valid datatypes for HPCA are 'moments' and 'ion'; the default is 'moments'
get_support_data: bool
Data with an attribute "VAR_TYPE" with a value of "support_data"
will be loaded into tplot. By default, only loads in data with a
"VAR_TYPE" attribute of "data".
varformat: str
The file variable formats to load into tplot. Wildcard character
"*" is accepted. By default, all variables are loaded in.
varnames: list of str
List of variable names to load (if not specified,
all data variables are loaded)
time_clip: bool
Data will be clipped to the exact trange specified by the trange keyword.
suffix: str
The tplot variable names will be given this suffix. By default,
no suffix is added.
center_measurement: bool
If True, the CDF epoch variables are time-shifted to the middle
of the accumulation interval by their DELTA_PLUS_VAR and
DELTA_MINUS_VAR variable attributes
notplot: bool
If True, then data are returned in a hash table instead of
being stored in tplot variables (useful for debugging, and
access to multi-dimensional data products)
available: bool
If True, simply return the available data files (without downloading)
for the requested paramters
no_update: bool
Set this flag to preserve the original data. if not set and newer
data is found the existing data will be overwritten
cdf_version: str
Specify a specific CDF version # to load (e.g., cdf_version='4.3.0')
min_version: str
Specify a minimum CDF version # to load
latest_version: bool
Only grab the latest CDF version in the requested time interval
major_version: bool
Only open the latest major CDF version (e.g., X in vX.Y.Z) in the requested time interval
always_prompt: bool
Set this keyword to always prompt for the user's username and password;
useful if you accidently save an incorrect password, or if your SDC password has changed
spdf: bool
If True, download the data from the SPDF instead of the SDC
Returns:
List of tplot variables created.
"""
if get_support_data is None:
get_support_data = True
tvars = mms_load_data(trange=trange, notplot=notplot, probe=probe, data_rate=data_rate, level=level, instrument='hpca',
datatype=datatype, varformat=varformat, varnames=varnames, suffix=suffix, get_support_data=get_support_data,
time_clip=time_clip, no_update=no_update, center_measurement=center_measurement, available=available,
latest_version=latest_version, major_version=major_version, min_version=min_version, cdf_version=cdf_version,
spdf=spdf, always_prompt=always_prompt)
if tvars == None or available or notplot or CONFIG['download_only']:
return tvars
mms_hpca_set_metadata(probe=probe, suffix=suffix)
return tvars
@print_vars
def mms_load_fpi(trange=['2015-10-16', '2015-10-17'], probe='1', data_rate='fast',
level='l2', datatype=['des-moms', 'dis-moms'], varformat=None, varnames=[], suffix='',
get_support_data=False, time_clip=False, no_update=False, center_measurement=False,
available=False, notplot=False, latest_version=False, major_version=False,
min_version=None, cdf_version=None, spdf=False, always_prompt=False):
"""
This function loads FPI data into tplot variables
Parameters:
trange : list of str
time range of interest [starttime, endtime] with the format
'YYYY-MM-DD','YYYY-MM-DD'] or to specify more or less than a day
['YYYY-MM-DD/hh:mm:ss','YYYY-MM-DD/hh:mm:ss']
probe : str or list of str
list of probes, valid values for MMS probes are ['1','2','3','4'].
data_rate : str or list of str
instrument data rates for FPI include 'brst', 'fast'. The
default is 'srvy'.
level : str
indicates level of data processing. the default if no level is specified is 'l2'
datatype : str or list of str
Valid datatypes for FPI are:
'des-moms', 'dis-moms' (default)
'des-dist', 'dis-dist'
get_support_data: bool
Data with an attribute "VAR_TYPE" with a value of "support_data"
will be loaded into tplot. By default, only loads in data with a
"VAR_TYPE" attribute of "data".
time_clip: bool
Data will be clipped to the exact trange specified by the trange keyword.
varformat: str
The file variable formats to load into tplot. Wildcard character
"*" is accepted. By default, all variables are loaded in.
varnames: list of str
List of variable names to load (if not specified,
all data variables are loaded)
suffix: str
The tplot variable names will be given this suffix. By default,
no suffix is added.
center_measurement: bool
If True, the CDF epoch variables are time-shifted to the middle
of the accumulation interval by their DELTA_PLUS_VAR and
DELTA_MINUS_VAR variable attributes
notplot: bool
If True, then data are returned in a hash table instead of
being stored in tplot variables (useful for debugging, and
access to multi-dimensional data products)
available: bool
If True, simply return the available data files (without downloading)
for the requested paramters
no_update: bool
Set this flag to preserve the original data. if not set and newer
data is found the existing data will be overwritten
cdf_version: str
Specify a specific CDF version # to load (e.g., cdf_version='4.3.0')
min_version: str
Specify a minimum CDF version # to load
latest_version: bool
Only grab the latest CDF version in the requested time interval
major_version: bool
Only open the latest major CDF version (e.g., X in vX.Y.Z) in the requested time interval
always_prompt: bool
Set this keyword to always prompt for the user's username and password;
useful if you accidently save an incorrect password, or if your SDC password has changed
spdf: bool
If True, download the data from the SPDF instead of the SDC
Returns:
List of tplot variables created.
"""
tvars = mms_load_data(trange=trange, probe=probe, data_rate=data_rate, level=level, instrument='fpi',
datatype=datatype, varformat=varformat, varnames=varnames, suffix=suffix, get_support_data=get_support_data,
time_clip=time_clip, no_update=no_update, center_measurement=center_measurement, available=available,
notplot=notplot, latest_version=latest_version, major_version=major_version, min_version=min_version,
cdf_version=cdf_version, spdf=spdf, always_prompt=always_prompt)
if tvars == None or available or notplot or CONFIG['download_only']:
return tvars
mms_fpi_set_metadata(probe, data_rate, datatype, level, suffix=suffix)
return tvars
@print_vars
def mms_load_scm(trange=['2015-10-16', '2015-10-17'], probe='1', data_rate='srvy',
level='l2', datatype='', varformat=None, varnames=[], suffix='', get_support_data=False,
time_clip=True, no_update=False, available=False, notplot=False, latest_version=False,
major_version=False, min_version=None, cdf_version=None, spdf=False, always_prompt=False):
"""
This function loads SCM data into tplot variables
Parameters:
trange : list of str
time range of interest [starttime, endtime] with the format
'YYYY-MM-DD','YYYY-MM-DD'] or to specify more or less than a day
['YYYY-MM-DD/hh:mm:ss','YYYY-MM-DD/hh:mm:ss']
probe : str or list of str
list of probes, valid values for MMS probes are ['1','2','3','4'].
data_rate : str or list of str
instrument data rates for SCM include ['brst' 'fast' 'slow' 'srvy']. The
default is 'srvy'.
level : str
indicates level of data processing. the default if no level is specified is 'l2'
datatype : str or list of str
Valid datatypes for SCM are: ['scsrvy', 'cal', 'scb', 'scf', 'schb', 'scm', 'scs']
If no value is given the default is scsrvy for srvy data, and scb for brst data.
get_support_data: bool
Data with an attribute "VAR_TYPE" with a value of "support_data"
will be loaded into tplot. By default, only loads in data with a
"VAR_TYPE" attribute of "data".
time_clip: bool
Data will be clipped to the exact trange specified by the trange keyword.
varformat: str
The file variable formats to load into tplot. Wildcard character
"*" is accepted. By default, all variables are loaded in.
varnames: list of str
List of variable names to load (if not specified,
all data variables are loaded)
suffix: str
The tplot variable names will be given this suffix. By default,
no suffix is added.
notplot: bool
If True, then data are returned in a hash table instead of
being stored in tplot variables (useful for debugging, and
access to multi-dimensional data products)
available: bool
If True, simply return the available data files (without downloading)
for the requested paramters
no_update: bool
Set this flag to preserve the original data. if not set and newer
data is found the existing data will be overwritten
cdf_version: str
Specify a specific CDF version # to load (e.g., cdf_version='4.3.0')
min_version: str
Specify a minimum CDF version # to load
latest_version: bool
Only grab the latest CDF version in the requested time interval
major_version: bool
Only open the latest major CDF version (e.g., X in vX.Y.Z) in the requested time interval
always_prompt: bool
Set this keyword to always prompt for the user's username and password;
useful if you accidently save an incorrect password, or if your SDC password has changed
spdf: bool
If True, download the data from the SPDF instead of the SDC
Returns:
List of tplot variables created.
"""
if not isinstance(data_rate, list):
data_rate = list([data_rate])
if isinstance(datatype, str) and datatype == '':
# guess from data_rate
datatype = list()
for dr in data_rate:
if dr == 'srvy':
datatype.append('scsrvy')
if dr == 'brst':
datatype.extend(['scb', 'schb'])
datatype = list(set(datatype)) # make it unique
else:
if not isinstance(datatype, list):
datatype = list([datatype])
# ensure datatype does not contain empty string
datatype = list(set([dt.strip() for dt in datatype]))
if '' in datatype:
datatype.remove('')
tvars = mms_load_data(trange=trange, notplot=notplot, probe=probe, data_rate=data_rate, level=level, instrument='scm',
datatype=datatype, varformat=varformat, varnames=varnames, suffix=suffix, get_support_data=get_support_data,
time_clip=time_clip, no_update=no_update, available=available, latest_version=latest_version,
major_version=major_version, min_version=min_version, cdf_version=cdf_version, spdf=spdf,
always_prompt=always_prompt)
if tvars == None or available or notplot or CONFIG['download_only']:
return tvars
coord = ''
if level == 'l1a':
coord = '123'
elif level == 'l1b':
coord = 'scm123'
elif level == 'l2':
coord = 'gse'
if not isinstance(probe, list):
probe = [probe]
if not isinstance(datatype, list):
datatype = [datatype]
probe = [str(p) for p in probe]
for p in probe:
for dtype in datatype:
mms_scm_set_metadata(tvars, p, dtype, coord, suffix=suffix)
return tvars
@print_vars
def mms_load_mec(trange=['2015-10-16', '2015-10-17'], probe='1', data_rate='srvy',
level='l2', datatype='ephts04d', varformat=None, varnames=[], suffix='', get_support_data=False,
time_clip=False, no_update=False, available=False, notplot=False, latest_version=False,
major_version=False, min_version=None, cdf_version=None, spdf=False, always_prompt=False):
"""
This function loads MEC data into tplot variables
Parameters:
trange : list of str
time range of interest [starttime, endtime] with the format
'YYYY-MM-DD','YYYY-MM-DD'] or to specify more or less than a day
['YYYY-MM-DD/hh:mm:ss','YYYY-MM-DD/hh:mm:ss']
probe : str or list of str
list of probes, valid values for MMS probes are ['1','2','3','4'].
data_rate : str or list of str
instrument data rates for MEC include ['brst', 'srvy']. The
default is 'srvy'.
level : str
indicates level of data processing. the default if no level is specified is 'l2'
datatype : str or list of str
Valid datatypes for MEC are: ['ephts04d', 'epht89q', 'epht89d']; default is 'ephts04d'
get_support_data: bool
Data with an attribute "VAR_TYPE" with a value of "support_data"
will be loaded into tplot. By default, only loads in data with a
"VAR_TYPE" attribute of "data".
time_clip: bool
Data will be clipped to the exact trange specified by the trange keyword.
varformat: str
The file variable formats to load into tplot. Wildcard character
"*" is accepted. By default, all variables are loaded in.
varnames: list of str
List of variable names to load (if not specified,
all data variables are loaded)
suffix: str
The tplot variable names will be given this suffix. By default,
no suffix is added.
notplot: bool
If True, then data are returned in a hash table instead of
being stored in tplot variables (useful for debugging, and
access to multi-dimensional data products)
available: bool
If True, simply return the available data files (without downloading)
for the requested paramters
no_update: bool
Set this flag to preserve the original data. if not set and newer
data is found the existing data will be overwritten
cdf_version: str
Specify a specific CDF version # to load (e.g., cdf_version='4.3.0')
min_version: str
Specify a minimum CDF version # to load
latest_version: bool
Only grab the latest CDF version in the requested time interval
major_version: bool
Only open the latest major CDF version (e.g., X in vX.Y.Z) in the requested time interval
always_prompt: bool
Set this keyword to always prompt for the user's username and password;
useful if you accidently save an incorrect password, or if your SDC password has changed
spdf: bool
If True, download the data from the SPDF instead of the SDC
Returns:
List of tplot variables created.
"""
tvars = mms_load_data(trange=trange, probe=probe, data_rate=data_rate, level=level, instrument='mec',
datatype=datatype, get_support_data=get_support_data, varformat=varformat, varnames=varnames, suffix=suffix,
time_clip=time_clip, no_update=no_update, available=available, notplot=notplot,
latest_version=latest_version, major_version=major_version, min_version=min_version,
cdf_version=cdf_version, spdf=spdf, always_prompt=always_prompt)
if tvars == None or available or notplot or CONFIG['download_only']:
return tvars
mms_mec_set_metadata(probe, data_rate, level, suffix=suffix)
return tvars
@print_vars
def mms_load_feeps(trange=['2015-10-16', '2015-10-17'], probe='1', data_rate='srvy',
level='l2', datatype='electron', varformat=None, varnames=[], get_support_data=True, suffix='', time_clip=False,
no_update=False, available=False, notplot=False, no_flatfield_corrections=False, data_units=['count_rate', 'intensity'],
latest_version=False, major_version=False, min_version=None, cdf_version=None, spdf=False, always_prompt=False):
"""
This function loads FEEPS data into tplot variables
Parameters:
trange : list of str
time range of interest [starttime, endtime] with the format
'YYYY-MM-DD','YYYY-MM-DD'] or to specify more or less than a day
['YYYY-MM-DD/hh:mm:ss','YYYY-MM-DD/hh:mm:ss']
probe : str or list of str
list of probes, valid values for MMS probes are ['1','2','3','4'].
data_rate : str or list of str
instrument data rates for FEEPS include ['brst', 'srvy']. The
default is 'srvy'.
level : str
indicates level of data processing. the default if no level is specified is 'l2'
datatype : str or list of str
Valid datatypes for FEEPS are:
L2, L1b: ['electron', 'ion']
L1a: ['electron-bottom', 'electron-top', 'ion-bottom', 'ion-top']
get_support_data: bool
Data with an attribute "VAR_TYPE" with a value of "support_data"
will be loaded into tplot. By default, only loads in data with a
"VAR_TYPE" attribute of "data".
time_clip: bool
Data will be clipped to the exact trange specified by the trange keyword.
varformat: str
The file variable formats to load into tplot. Wildcard character
"*" is accepted. By default, all variables are loaded in.
varnames: list of str
List of variable names to load (if not specified,
all data variables are loaded)
suffix: str
The tplot variable names will be given this suffix. By default,
no suffix is added.
notplot: bool
If True, then data are returned in a hash table instead of
being stored in tplot variables (useful for debugging, and
access to multi-dimensional data products)
available: bool
If True, simply return the available data files (without downloading)
for the requested paramters
no_update: bool
Set this flag to preserve the original data. if not set and newer
data is found the existing data will be overwritten
cdf_version: str
Specify a specific CDF version # to load (e.g., cdf_version='4.3.0')
min_version: str
Specify a minimum CDF version # to load
latest_version: bool
Only grab the latest CDF version in the requested time interval
major_version: bool
Only open the latest major CDF version (e.g., X in vX.Y.Z) in the requested time interval
always_prompt: bool
Set this keyword to always prompt for the user's username and password;
useful if you accidently save an incorrect password, or if your SDC password has changed
spdf: bool
If True, download the data from the SPDF instead of the SDC
Returns:
List of tplot variables created.
"""
tvars = mms_load_data(trange=trange, notplot=notplot, probe=probe, data_rate=data_rate, level=level, instrument='feeps',
datatype=datatype, varformat=varformat, varnames=varnames, get_support_data=get_support_data, suffix=suffix,
time_clip=time_clip, no_update=no_update, available=available, latest_version=latest_version,
major_version=major_version, min_version=min_version, cdf_version=cdf_version, spdf=spdf, always_prompt=always_prompt)
if tvars == [] or available or notplot or CONFIG['download_only']:
return tvars
probes = probe if isinstance(probe, list) else [probe]
data_rates = data_rate if isinstance(data_rate, list) else [data_rate]
levels = level if isinstance(level, list) else [level]
datatypes = datatype if isinstance(datatype, list) else [datatype]
data_units = data_units if isinstance(data_units, list) else [data_units]
probes = [str(p) for p in probes]
mms_feeps_correct_energies(probes, data_rate, level=level, suffix=suffix)
if not no_flatfield_corrections:
mms_feeps_flat_field_corrections(probes=probes, data_rate=data_rate, suffix=suffix)
for probe in probes:
for datatype in datatypes:
mms_feeps_remove_bad_data(probe=probe, data_rate=data_rate, datatype =datatype, level=level, suffix=suffix)
for data_unit in data_units:
eyes = mms_feeps_active_eyes(trange, probe, data_rate, datatype, level)
split_vars = mms_feeps_split_integral_ch(data_unit, datatype, probe, suffix=suffix, data_rate=data_rate, level=level, sensor_eyes=eyes)
sun_removed_vars = mms_feeps_remove_sun(eyes, trange, probe=probe, datatype=datatype, data_units=data_unit, data_rate=data_rate, level=level, suffix=suffix)
omni_vars = mms_feeps_omni(eyes, probe=probe, datatype=datatype, data_units=data_unit, data_rate=data_rate, level=level, suffix=suffix)
tvars = tvars + split_vars + sun_removed_vars + omni_vars
tvars.append(mms_feeps_spin_avg(probe=probe, data_units=data_unit, datatype=datatype, data_rate=data_rate, level=level, suffix=suffix))
return tvars
@print_vars
def mms_load_eis(trange=['2015-10-16', '2015-10-17'], probe='1', data_rate='srvy', level='l2', datatype='extof',
varformat=None, varnames=[], get_support_data=True, suffix='', time_clip=False, no_update=False,
available=False, notplot=False, latest_version=False, major_version=False, min_version=None, cdf_version=None,
spdf=False, always_prompt=False):
"""
This function loads EIS data into tplot variables
Parameters:
trange : list of str
time range of interest [starttime, endtime] with the format
'YYYY-MM-DD','YYYY-MM-DD'] or to specify more or less than a day
['YYYY-MM-DD/hh:mm:ss','YYYY-MM-DD/hh:mm:ss']
probe : str or list of str
list of probes, valid values for MMS probes are ['1','2','3','4'].
data_rate : str or list of str
instrument data rates for EIS include ['brst', 'srvy']. The
default is 'srvy'.
level : str
indicates level of data processing. the default if no level is specified is 'l2'
datatype : str or list of str
Valid datatypes for EIS are: ['extof', 'phxtof', and 'electronenergy']; default is 'extof'
get_support_data: bool
Data with an attribute "VAR_TYPE" with a value of "support_data"
will be loaded into tplot. By default, only loads in data with a
"VAR_TYPE" attribute of "data".
time_clip: bool
Data will be clipped to the exact trange specified by the trange keyword.
varformat: str
The file variable formats to load into tplot. Wildcard character
"*" is accepted. By default, all variables are loaded in.
varnames: list of str
List of variable names to load (if not specified,
all data variables are loaded)
suffix: str
The tplot variable names will be given this suffix. By default,
no suffix is added.
notplot: bool
If True, then data are returned in a hash table instead of
being stored in tplot variables (useful for debugging, and
access to multi-dimensional data products)
available: bool
If True, simply return the available data files (without downloading)
for the requested paramters
no_update: bool
Set this flag to preserve the original data. if not set and newer
data is found the existing data will be overwritten
cdf_version: str
Specify a specific CDF version # to load (e.g., cdf_version='4.3.0')
min_version: str
Specify a minimum CDF version # to load
latest_version: bool
Only grab the latest CDF version in the requested time interval
major_version: bool
Only open the latest major CDF version (e.g., X in vX.Y.Z) in the requested time interval
always_prompt: bool
Set this keyword to always prompt for the user's username and password;
useful if you accidently save an incorrect password, or if your SDC password has changed
spdf: bool
If True, download the data from the SPDF instead of the SDC
Returns:
List of tplot variables created.
"""
tvars = mms_load_data(trange=trange, notplot=notplot, probe=probe, data_rate=data_rate, level=level, instrument='epd-eis',
datatype=datatype, varformat=varformat, varnames=varnames, get_support_data=get_support_data, prefix='', suffix=suffix,
time_clip=time_clip, no_update=no_update, available=available, latest_version=latest_version,
major_version=major_version, min_version=min_version, cdf_version=cdf_version, spdf=spdf, always_prompt=always_prompt)
if tvars == [] or available or notplot or CONFIG['download_only']:
return tvars
if not isinstance(probe, list): probe = [probe]
if not isinstance(data_rate, list): data_rate = [data_rate]
if not isinstance(datatype, list): datatype = [datatype]
# the probes will need to be strings beyond this point
if isinstance(probe, list):
probe = [str(p) for p in probe]
for probe_id in probe:
for datatype_id in datatype:
for data_rate_id in data_rate:
if datatype_id == 'electronenergy':
e_spin_avg_var = mms_eis_spin_avg(probe=probe_id, species='electron', datatype=datatype_id, data_rate=data_rate_id, suffix=suffix)
# create non-spin averaged omni-directional spectra
e_omni_spectra = mms_eis_omni(probe_id, species='electron', data_rate=data_rate_id, datatype=datatype_id)
# create spin averaged omni-directional spectra
e_omni_spectra_spin = mms_eis_omni(probe_id, species='electron', data_rate=data_rate_id, datatype=datatype_id, suffix=suffix+'_spin')
# add the vars to the output
if e_spin_avg_var is not None:
for tvar in e_spin_avg_var:
tvars.append(tvar)
if e_omni_spectra is not None:
tvars.append(e_omni_spectra)
if e_omni_spectra_spin is not None:
tvars.append(e_omni_spectra_spin)
elif datatype_id == 'extof':
p_spin_avg_var = mms_eis_spin_avg(probe=probe_id, species='proton', datatype=datatype_id, data_rate=data_rate_id, suffix=suffix)
o_spin_avg_var = mms_eis_spin_avg(probe=probe_id, species='oxygen', datatype=datatype_id, data_rate=data_rate_id, suffix=suffix)
a_spin_avg_var = mms_eis_spin_avg(probe=probe_id, species='alpha', datatype=datatype_id, data_rate=data_rate_id, suffix=suffix)
# create non-spin averaged omni-directional spectra
p_omni_spectra = mms_eis_omni(probe_id, species='proton', data_rate=data_rate_id, datatype=datatype_id, suffix=suffix)
o_omni_spectra = mms_eis_omni(probe_id, species='oxygen', data_rate=data_rate_id, datatype=datatype_id, suffix=suffix)
a_omni_spectra = mms_eis_omni(probe_id, species='alpha', data_rate=data_rate_id, datatype=datatype_id, suffix=suffix)
# create spin averaged omni-directional spectra
p_omni_spectra_spin = mms_eis_omni(probe_id, species='proton', data_rate=data_rate_id, datatype=datatype_id, suffix=suffix+'_spin')
o_omni_spectra_spin = mms_eis_omni(probe_id, species='oxygen', data_rate=data_rate_id, datatype=datatype_id, suffix=suffix+'_spin')
a_omni_spectra_spin = mms_eis_omni(probe_id, species='alpha', data_rate=data_rate_id, datatype=datatype_id, suffix=suffix+'_spin')
# add the vars to the output
if p_spin_avg_var is not None:
for tvar in p_spin_avg_var:
tvars.append(tvar)
if o_spin_avg_var is not None:
for tvar in o_spin_avg_var:
tvars.append(tvar)
if a_spin_avg_var is not None:
for tvar in a_spin_avg_var:
tvars.append(tvar)
if p_omni_spectra is not None:
tvars.append(p_omni_spectra)
if o_omni_spectra is not None:
tvars.append(o_omni_spectra)
if a_omni_spectra is not None:
tvars.append(a_omni_spectra)
if p_omni_spectra_spin is not None:
tvars.append(p_omni_spectra_spin)
if o_omni_spectra_spin is not None:
tvars.append(o_omni_spectra_spin)
if a_omni_spectra_spin is not None:
tvars.append(a_omni_spectra_spin)
elif datatype_id == 'phxtof':
p_spin_avg_var = mms_eis_spin_avg(probe=probe_id, species='proton', datatype=datatype_id, data_rate=data_rate_id, suffix=suffix)
o_spin_avg_var = mms_eis_spin_avg(probe=probe_id, species='oxygen', datatype=datatype_id, data_rate=data_rate_id, suffix=suffix)
# create non-spin averaged omni-directional spectra
p_omni_spectra = mms_eis_omni(probe_id, species='proton', data_rate=data_rate_id, datatype=datatype_id, suffix=suffix)
o_omni_spectra = mms_eis_omni(probe_id, species='oxygen', data_rate=data_rate_id, datatype=datatype_id, suffix=suffix)
# create spin averaged omni-directional spectra
p_omni_spectra_spin = mms_eis_omni(probe_id, species='proton', data_rate=data_rate_id, datatype=datatype_id, suffix=suffix+'_spin')
o_omni_spectra_spin = mms_eis_omni(probe_id, species='oxygen', data_rate=data_rate_id, datatype=datatype_id, suffix=suffix+'_spin')
# add the vars to the output
if p_spin_avg_var is not None:
for tvar in p_spin_avg_var:
tvars.append(tvar)
if o_spin_avg_var is not None:
for tvar in o_spin_avg_var:
tvars.append(tvar)
if p_omni_spectra is not None:
tvars.append(p_omni_spectra)
if o_omni_spectra is not None:
tvars.append(o_omni_spectra)
if p_omni_spectra_spin is not None:
tvars.append(p_omni_spectra_spin)
if o_omni_spectra_spin is not None:
tvars.append(o_omni_spectra_spin)
mms_eis_set_metadata(tnames(tvars), data_rate=data_rate_id, datatype=datatype_id, suffix=suffix)
return tnames(tvars)
@print_vars
def mms_load_edi(trange=['2016-10-16', '2016-10-17'], probe='1', data_rate='srvy', level='l2', datatype='efield',
varformat=None, varnames=[], get_support_data=False, suffix='', time_clip=False, no_update=False,
available=False, notplot=False, latest_version=False, major_version=False, min_version=None, cdf_version=None,
spdf=False, always_prompt=False):
"""
This function loads EDI data into tplot variables
Parameters:
trange : list of str
time range of interest [starttime, endtime] with the format
'YYYY-MM-DD','YYYY-MM-DD'] or to specify more or less than a day
['YYYY-MM-DD/hh:mm:ss','YYYY-MM-DD/hh:mm:ss']
probe : str or list of str
list of probes, valid values for MMS probes are ['1','2','3','4'].
data_rate : str or list of str
instrument data rates for EDI include ['brst', 'fast', 'slow', 'srvy']. The
default is 'srvy'.
level : str
indicates level of data processing. the default if no level is specified is 'l2'
datatype : str or list of str
Valid datatypes for EDI are: ['efield', 'amb']; default is 'efield'
get_support_data: bool
Data with an attribute "VAR_TYPE" with a value of "support_data"
will be loaded into tplot. By default, only loads in data with a
"VAR_TYPE" attribute of "data".
time_clip: bool
Data will be clipped to the exact trange specified by the trange keyword.
varformat: str
The file variable formats to load into tplot. Wildcard character
"*" is accepted. By default, all variables are loaded in.
varnames: list of str
List of variable names to load (if not specified,
all data variables are loaded)
suffix: str
The tplot variable names will be given this suffix. By default,
no suffix is added.
notplot: bool
If True, then data are returned in a hash table instead of
being stored in tplot variables (useful for debugging, and
access to multi-dimensional data products)
available: bool
If True, simply return the available data files (without downloading)
for the requested paramters
no_update: bool
Set this flag to preserve the original data. if not set and newer
data is found the existing data will be overwritten
cdf_version: str
Specify a specific CDF version # to load (e.g., cdf_version='4.3.0')
min_version: str
Specify a minimum CDF version # to load
latest_version: bool
Only grab the latest CDF version in the requested time interval
major_version: bool
Only open the latest major CDF version (e.g., X in vX.Y.Z) in the requested time interval
always_prompt: bool
Set this keyword to always prompt for the user's username and password;
useful if you accidently save an incorrect password, or if your SDC password has changed
spdf: bool
If True, download the data from the SPDF instead of the SDC
Returns:
List of tplot variables created.
"""
tvars = mms_load_data(trange=trange, notplot=notplot, probe=probe, data_rate=data_rate, level=level, instrument='edi',
datatype=datatype, varformat=varformat, varnames=varnames, get_support_data=get_support_data, suffix=suffix, time_clip=time_clip,
no_update=no_update, available=available, latest_version=latest_version, major_version=major_version,
min_version=min_version, cdf_version=cdf_version, spdf=spdf, always_prompt=always_prompt)
if tvars == None or available or notplot or CONFIG['download_only']:
return tvars
mms_edi_set_metadata(probe, data_rate, level, suffix=suffix)
return tvars
@print_vars
def mms_load_edp(trange=['2015-10-16', '2015-10-17'], probe='1', data_rate='fast', level='l2', datatype='dce',
varformat=None, varnames=[], get_support_data=False, suffix='', time_clip=True, no_update=False,
available=False, notplot=False, latest_version=False, major_version=False, min_version=None, cdf_version=None,
spdf=False, always_prompt=False):
"""
This function loads EDP data into tplot variables
Parameters:
trange : list of str
time range of interest [starttime, endtime] with the format
'YYYY-MM-DD','YYYY-MM-DD'] or to specify more or less than a day
['YYYY-MM-DD/hh:mm:ss','YYYY-MM-DD/hh:mm:ss']
probe : str or list of str
list of probes, valid values for MMS probes are ['1','2','3','4'].
data_rate : str or list of str
instrument data rates for EDP include ['brst', 'fast', 'slow', 'srvy']. The
default is 'fast'.
level : str
indicates level of data processing. the default if no level is specified is 'l2'
datatype : str or list of str
Valid datatypes for EDP are: ['dce', 'dcv', 'ace', 'hmfe']; default is 'dce'
get_support_data: bool
Data with an attribute "VAR_TYPE" with a value of "support_data"
will be loaded into tplot. By default, only loads in data with a
"VAR_TYPE" attribute of "data".
time_clip: bool
Data will be clipped to the exact trange specified by the trange keyword.
varformat: str
The file variable formats to load into tplot. Wildcard character
"*" is accepted. By default, all variables are loaded in.
varnames: list of str
List of variable names to load (if not specified,
all data variables are loaded)
suffix: str
The tplot variable names will be given this suffix. By default,
no suffix is added.
notplot: bool
If True, then data are returned in a hash table instead of
being stored in tplot variables (useful for debugging, and
access to multi-dimensional data products)
available: bool
If True, simply return the available data files (without downloading)
for the requested paramters
no_update: bool
Set this flag to preserve the original data. if not set and newer
data is found the existing data will be overwritten
cdf_version: str
Specify a specific CDF version # to load (e.g., cdf_version='4.3.0')
min_version: str
Specify a minimum CDF version # to load
latest_version: bool
Only grab the latest CDF version in the requested time interval
major_version: bool
Only open the latest major CDF version (e.g., X in vX.Y.Z) in the requested time interval
always_prompt: bool
Set this keyword to always prompt for the user's username and password;
useful if you accidently save an incorrect password, or if your SDC password has changed
spdf: bool
If True, download the data from the SPDF instead of the SDC
Returns:
List of tplot variables created.
"""
tvars = mms_load_data(trange=trange, notplot=notplot, probe=probe, data_rate=data_rate, level=level, instrument='edp',
datatype=datatype, varformat=varformat, varnames=varnames, get_support_data=get_support_data, suffix=suffix,
time_clip=time_clip, no_update=no_update, available=available, latest_version=latest_version,
major_version=major_version, min_version=min_version, cdf_version=cdf_version, spdf=spdf,
always_prompt=always_prompt)
if tvars == None or available or notplot or CONFIG['download_only']:
return tvars
mms_edp_set_metadata(probe, data_rate, level, suffix=suffix)
return tvars
@print_vars
def mms_load_dsp(trange=['2015-10-16', '2015-10-17'], probe='1', data_rate='srvy',
level='l2', datatype='bpsd', varformat=None, varnames=[], suffix='', get_support_data=False,
time_clip=False, no_update=False, available=False, notplot=False, latest_version=False,
major_version=False, min_version=None, cdf_version=None, spdf=False, always_prompt=False):
"""
This function loads DSP data into tplot variables
Parameters:
trange : list of str
time range of interest [starttime, endtime] with the format
'YYYY-MM-DD','YYYY-MM-DD'] or to specify more or less than a day
['YYYY-MM-DD/hh:mm:ss','YYYY-MM-DD/hh:mm:ss']
probe : str or list of str
list of probes, valid values for MMS probes are ['1','2','3','4'].
data_rate : str or list of str
instrument data rates for DSP include ['fast', 'slow', 'srvy']. The
default is 'srvy'.
level : str
indicates level of data processing. the default if no level is specified is 'l2'
datatype : str or list of str
Valid datatypes for DSP are: ['epsd', 'bpsd', 'swd']; default is 'bpsd'
get_support_data: bool
Data with an attribute "VAR_TYPE" with a value of "support_data"
will be loaded into tplot. By default, only loads in data with a
"VAR_TYPE" attribute of "data".
time_clip: bool
Data will be clipped to the exact trange specified by the trange keyword.
varformat: str
The file variable formats to load into tplot. Wildcard character
"*" is accepted. By default, all variables are loaded in.
varnames: list of str
List of variable names to load (if not specified,
all data variables are loaded)
suffix: str
The tplot variable names will be given this suffix. By default,
no suffix is added.
notplot: bool
If True, then data are returned in a hash table instead of
being stored in tplot variables (useful for debugging, and
access to multi-dimensional data products)
available: bool
If True, simply return the available data files (without downloading)
for the requested paramters
no_update: bool
Set this flag to preserve the original data. if not set and newer
data is found the existing data will be overwritten
cdf_version: str
Specify a specific CDF version # to load (e.g., cdf_version='4.3.0')
min_version: str
Specify a minimum CDF version # to load
latest_version: bool
Only grab the latest CDF version in the requested time interval
major_version: bool
Only open the latest major CDF version (e.g., X in vX.Y.Z) in the requested time interval
always_prompt: bool
Set this keyword to always prompt for the user's username and password;
useful if you accidently save an incorrect password, or if your SDC password has changed
spdf: bool
If True, download the data from the SPDF instead of the SDC
Returns:
List of tplot variables created.
"""
tvars = mms_load_data(trange=trange, notplot=notplot, probe=probe, data_rate=data_rate, level=level, instrument='dsp',
datatype=datatype, varformat=varformat, varnames=varnames, suffix=suffix, get_support_data=get_support_data, time_clip=time_clip,
no_update=no_update, available=available, latest_version=latest_version, major_version=major_version,
min_version=min_version, cdf_version=cdf_version, spdf=spdf, always_prompt=always_prompt)
if tvars == None or available or notplot or CONFIG['download_only']:
return tvars
mms_dsp_set_metadata(probe, data_rate, level, suffix=suffix)
return tvars
@print_vars
def mms_load_aspoc(trange=['2015-10-16', '2015-10-17'], probe='1', data_rate='srvy',
level='l2', datatype='', varformat=None, varnames=[], get_support_data=False, suffix='', time_clip=False, no_update=False,
available=False, notplot=False, latest_version=False, major_version=False, min_version=None, cdf_version=None,
spdf=False, always_prompt=False):
"""
This function loads ASPOC data into tplot variables
Parameters:
trange : list of str
time range of interest [starttime, endtime] with the format
'YYYY-MM-DD','YYYY-MM-DD'] or to specify more or less than a day
['YYYY-MM-DD/hh:mm:ss','YYYY-MM-DD/hh:mm:ss']
probe : str or list of str
list of probes, valid values for MMS probes are ['1','2','3','4'].
data_rate : str or list of str
instrument data rates for ASPOC include 'srvy', 'sitl'. The
default is 'srvy'.
level : str
indicates level of data processing. the default if no level is specified is 'l2'
datatype : str or list of str
Valid datatypes for ASPOC are: ['asp1', 'asp2', 'aspoc']; default is 'aspoc'
get_support_data: bool
Data with an attribute "VAR_TYPE" with a value of "support_data"
will be loaded into tplot. By default, only loads in data with a
"VAR_TYPE" attribute of "data".
time_clip: bool
Data will be clipped to the exact trange specified by the trange keyword.
varformat: str
The file variable formats to load into tplot. Wildcard character
"*" is accepted. By default, all variables are loaded in.
varnames: list of str
List of variable names to load (if not specified,
all data variables are loaded)
suffix: str
The tplot variable names will be given this suffix. By default,
no suffix is added.
notplot: bool
If True, then data are returned in a hash table instead of
being stored in tplot variables (useful for debugging, and
access to multi-dimensional data products)
available: bool
If True, simply return the available data files (without downloading)
for the requested paramters
no_update: bool
Set this flag to preserve the original data. if not set and newer
data is found the existing data will be overwritten
cdf_version: str
Specify a specific CDF version # to load (e.g., cdf_version='4.3.0')
min_version: str
Specify a minimum CDF version # to load
latest_version: bool
Only grab the latest CDF version in the requested time interval
major_version: bool
Only open the latest major CDF version (e.g., X in vX.Y.Z) in the requested time interval
always_prompt: bool
Set this keyword to always prompt for the user's username and password;
useful if you accidently save an incorrect password, or if your SDC password has changed
spdf: bool
If True, download the data from the SPDF instead of the SDC
Returns:
List of tplot variables created.
"""
if suffix == '':
suffix = '_' + level
else:
suffix = '_' + level + suffix
tvars = mms_load_data(trange=trange, notplot=notplot, probe=probe, data_rate=data_rate, level=level, instrument='aspoc',
datatype=datatype, varformat=varformat, varnames=varnames, get_support_data=get_support_data, suffix=suffix,
time_clip=time_clip, no_update=no_update, available=available, latest_version=latest_version,
major_version=major_version, min_version=min_version, cdf_version=cdf_version, spdf=spdf,
always_prompt=always_prompt)
return tvars
@print_vars
def mms_load_fsm(trange=['2015-10-16', '2015-10-17'], probe='1', data_rate='brst',
level='l3', datatype='8khz', get_support_data=False, time_clip=False, no_update=False,
available=False, varformat=None, varnames=[], notplot=False, suffix='', latest_version=False,
major_version=False, min_version=None, cdf_version=None, spdf=False, always_prompt=False):
"""
This function loads FSM data into tplot variables
Parameters:
trange : list of str
time range of interest [starttime, endtime] with the format
'YYYY-MM-DD','YYYY-MM-DD'] or to specify more or less than a day
['YYYY-MM-DD/hh:mm:ss','YYYY-MM-DD/hh:mm:ss']
probe : str or list of str
list of probes, valid values for MMS probes are ['1','2','3','4'].
data_rate : str or list of str
the current instrument data rate for FSM is 'brst'
level : str
indicates level of data processing. the default if no level is specified is 'l2'
datatype : str or list of str
Valid datatype for FSM is: 8khz
get_support_data: bool
Data with an attribute "VAR_TYPE" with a value of "support_data"
will be loaded into tplot. By default, only loads in data with a
"VAR_TYPE" attribute of "data".
time_clip: bool
Data will be clipped to the exact trange specified by the trange keyword.
varformat: str
The file variable formats to load into tplot. Wildcard character
"*" is accepted. By default, all variables are loaded in.
varnames: list of str
List of variable names to load (if not specified,
all data variables are loaded)
suffix: str
The tplot variable names will be given this suffix. By default,
no suffix is added.
notplot: bool
If True, then data are returned in a hash table instead of
being stored in tplot variables (useful for debugging, and
access to multi-dimensional data products)
available: bool
If True, simply return the available data files (without downloading)
for the requested paramters
no_update: bool
Set this flag to preserve the original data. if not set and newer
data is found the existing data will be overwritten
cdf_version: str
Specify a specific CDF version # to load (e.g., cdf_version='4.3.0')
min_version: str
Specify a minimum CDF version # to load
latest_version: bool
Only grab the latest CDF version in the requested time interval
major_version: bool
Only open the latest major CDF version (e.g., X in vX.Y.Z) in the requested time interval
always_prompt: bool
Set this keyword to always prompt for the user's username and password;
useful if you accidently save an incorrect password, or if your SDC password has changed
spdf: bool
If True, download the data from the SPDF instead of the SDC
Returns:
List of tplot variables created.
"""
tvars = mms_load_data(trange=trange, notplot=notplot, varformat=varformat, probe=probe, data_rate=data_rate,
level=level, instrument='fsm', datatype=datatype, get_support_data=get_support_data, time_clip=time_clip,
no_update=no_update, available=available, suffix=suffix, latest_version=latest_version, varnames=varnames,
major_version=major_version, min_version=min_version, cdf_version=cdf_version, spdf=spdf, always_prompt=always_prompt)
return tvars
'''
the following wrappers allow users to import the load routines using
the syntax:
>>> from pyspedas.mms import fgm
>>> fgm_data = fgm(...)
and/or
>>> import pyspedas
>>> fgm_data = pyspedas.mms.fgm(...)
'''
@wraps(mms_load_state)
def state(*args, **kwargs):
return mms_load_state(*args, **kwargs)
@wraps(mms_load_fgm)
def fgm(*args, **kwargs):
return mms_load_fgm(*args, **kwargs)
@wraps(mms_load_scm)
def scm(*args, **kwargs):
return mms_load_scm(*args, **kwargs)
@wraps(mms_load_fsm)
def fsm(*args, **kwargs):
return mms_load_fsm(*args, **kwargs)
@wraps(mms_load_edp)
def edp(*args, **kwargs):
return mms_load_edp(*args, **kwargs)
@wraps(mms_load_edi)
def edi(*args, **kwargs):
return mms_load_edi(*args, **kwargs)
@wraps(mms_load_fpi)
def fpi(*args, **kwargs):
return mms_load_fpi(*args, **kwargs)
@wraps(mms_load_hpca)
def hpca(*args, **kwargs):
return mms_load_hpca(*args, **kwargs)
@wraps(mms_load_eis)
def eis(*args, **kwargs):
return mms_load_eis(*args, **kwargs)
@wraps(mms_load_feeps)
def feeps(*args, **kwargs):
return mms_load_feeps(*args, **kwargs)
@wraps(mms_load_aspoc)
def aspoc(*args, **kwargs):
return mms_load_aspoc(*args, **kwargs)
@wraps(mms_load_mec)
def mec(*args, **kwargs):
return mms_load_mec(*args, **kwargs)
@wraps(mms_load_dsp)
def dsp(*args, **kwargs):
return mms_load_dsp(*args, **kwargs)
@wraps(mms_curl)
def curlometer(*args, **kwargs):
return mms_curl(*args, **kwargs)
| 42.507023
| 171
| 0.645229
| 8,796
| 63,548
| 4.51694
| 0.041724
| 0.024364
| 0.014271
| 0.010521
| 0.8736
| 0.846543
| 0.837432
| 0.832222
| 0.821147
| 0.813949
| 0
| 0.007866
| 0.281818
| 63,548
| 1,494
| 172
| 42.535475
| 0.862683
| 0.550041
| 0
| 0.371134
| 0
| 0
| 0.03507
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074742
| false
| 0
| 0.072165
| 0.036082
| 0.25
| 0.043814
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1631cd2cb606a2379e7a111f471934645c5f7c19
| 11,205
|
py
|
Python
|
tests/components/hunterdouglas_powerview/test_config_flow.py
|
gdt/core
|
72b0eb719e646cd8983f30ff797e7cd1cbe911e9
|
[
"Apache-2.0"
] | 6
|
2017-11-15T09:56:41.000Z
|
2021-01-24T15:12:09.000Z
|
tests/components/hunterdouglas_powerview/test_config_flow.py
|
gdt/core
|
72b0eb719e646cd8983f30ff797e7cd1cbe911e9
|
[
"Apache-2.0"
] | 87
|
2020-07-15T13:43:35.000Z
|
2022-03-23T07:43:10.000Z
|
tests/components/hunterdouglas_powerview/test_config_flow.py
|
gdt/core
|
72b0eb719e646cd8983f30ff797e7cd1cbe911e9
|
[
"Apache-2.0"
] | 2
|
2021-11-19T23:20:40.000Z
|
2021-11-20T00:18:40.000Z
|
"""Test the Logitech Harmony Hub config flow."""
import asyncio
import json
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from homeassistant import config_entries
from homeassistant.components import dhcp, zeroconf
from homeassistant.components.hunterdouglas_powerview.const import DOMAIN
from tests.common import MockConfigEntry, load_fixture
HOMEKIT_DISCOVERY_INFO = zeroconf.ZeroconfServiceInfo(
name="Hunter Douglas Powerview Hub._hap._tcp.local.",
host="1.2.3.4",
properties={"id": "AA::BB::CC::DD::EE::FF"},
)
ZEROCONF_DISCOVERY_INFO = zeroconf.ZeroconfServiceInfo(
name="Hunter Douglas Powerview Hub._powerview._tcp.local.",
host="1.2.3.4",
)
DHCP_DISCOVERY_INFO = dhcp.DhcpServiceInfo(
hostname="Hunter Douglas Powerview Hub", ip="1.2.3.4"
)
DISCOVERY_DATA = [
(
config_entries.SOURCE_HOMEKIT,
HOMEKIT_DISCOVERY_INFO,
),
(
config_entries.SOURCE_DHCP,
DHCP_DISCOVERY_INFO,
),
(config_entries.SOURCE_ZEROCONF, ZEROCONF_DISCOVERY_INFO),
]
def _get_mock_powerview_userdata(userdata=None, get_resources=None):
mock_powerview_userdata = MagicMock()
if not userdata:
userdata = json.loads(load_fixture("hunterdouglas_powerview/userdata.json"))
if get_resources:
mock_powerview_userdata.get_resources = AsyncMock(side_effect=get_resources)
else:
mock_powerview_userdata.get_resources = AsyncMock(return_value=userdata)
return mock_powerview_userdata
def _get_mock_powerview_legacy_userdata(userdata=None, get_resources=None):
mock_powerview_userdata_legacy = MagicMock()
if not userdata:
userdata = json.loads(load_fixture("hunterdouglas_powerview/userdata_v1.json"))
if get_resources:
mock_powerview_userdata_legacy.get_resources = AsyncMock(
side_effect=get_resources
)
else:
mock_powerview_userdata_legacy.get_resources = AsyncMock(return_value=userdata)
return mock_powerview_userdata_legacy
def _get_mock_powerview_fwversion(fwversion=None, get_resources=None):
mock_powerview_fwversion = MagicMock()
if not fwversion:
fwversion = json.loads(load_fixture("hunterdouglas_powerview/fwversion.json"))
if get_resources:
mock_powerview_fwversion.get_resources = AsyncMock(side_effect=get_resources)
else:
mock_powerview_fwversion.get_resources = AsyncMock(return_value=fwversion)
return mock_powerview_fwversion
async def test_user_form(hass):
"""Test we get the user form."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
mock_powerview_userdata = _get_mock_powerview_userdata()
with patch(
"homeassistant.components.hunterdouglas_powerview.UserData",
return_value=mock_powerview_userdata,
), patch(
"homeassistant.components.hunterdouglas_powerview.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": "1.2.3.4"},
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "AlexanderHD"
assert result2["data"] == {
"host": "1.2.3.4",
}
assert len(mock_setup_entry.mock_calls) == 1
result3 = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result3["type"] == "form"
assert result3["errors"] == {}
result4 = await hass.config_entries.flow.async_configure(
result3["flow_id"],
{"host": "1.2.3.4"},
)
assert result4["type"] == "abort"
async def test_user_form_legacy(hass):
"""Test we get the user form with a legacy device."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
mock_powerview_userdata = _get_mock_powerview_legacy_userdata()
mock_powerview_fwversion = _get_mock_powerview_fwversion()
with patch(
"homeassistant.components.hunterdouglas_powerview.UserData",
return_value=mock_powerview_userdata,
), patch(
"homeassistant.components.hunterdouglas_powerview.ApiEntryPoint",
return_value=mock_powerview_fwversion,
), patch(
"homeassistant.components.hunterdouglas_powerview.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": "1.2.3.4"},
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "PowerView Hub Gen 1"
assert result2["data"] == {
"host": "1.2.3.4",
}
assert len(mock_setup_entry.mock_calls) == 1
result3 = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result3["type"] == "form"
assert result3["errors"] == {}
result4 = await hass.config_entries.flow.async_configure(
result3["flow_id"],
{"host": "1.2.3.4"},
)
assert result4["type"] == "abort"
@pytest.mark.parametrize("source, discovery_info", DISCOVERY_DATA)
async def test_form_homekit_and_dhcp_cannot_connect(hass, source, discovery_info):
"""Test we get the form with homekit and dhcp source."""
ignored_config_entry = MockConfigEntry(
domain=DOMAIN, data={}, source=config_entries.SOURCE_IGNORE
)
ignored_config_entry.add_to_hass(hass)
mock_powerview_userdata = _get_mock_powerview_userdata(
get_resources=asyncio.TimeoutError
)
with patch(
"homeassistant.components.hunterdouglas_powerview.UserData",
return_value=mock_powerview_userdata,
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": source},
data=discovery_info,
)
assert result["type"] == "abort"
assert result["reason"] == "cannot_connect"
@pytest.mark.parametrize("source, discovery_info", DISCOVERY_DATA)
async def test_form_homekit_and_dhcp(hass, source, discovery_info):
"""Test we get the form with homekit and dhcp source."""
ignored_config_entry = MockConfigEntry(
domain=DOMAIN, data={}, source=config_entries.SOURCE_IGNORE
)
ignored_config_entry.add_to_hass(hass)
mock_powerview_userdata = _get_mock_powerview_userdata()
with patch(
"homeassistant.components.hunterdouglas_powerview.UserData",
return_value=mock_powerview_userdata,
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": source},
data=discovery_info,
)
assert result["type"] == "form"
assert result["step_id"] == "link"
assert result["errors"] is None
assert result["description_placeholders"] == {
"host": "1.2.3.4",
"name": "Hunter Douglas Powerview Hub",
}
with patch(
"homeassistant.components.hunterdouglas_powerview.UserData",
return_value=mock_powerview_userdata,
), patch(
"homeassistant.components.hunterdouglas_powerview.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "Hunter Douglas Powerview Hub"
assert result2["data"] == {"host": "1.2.3.4"}
assert result2["result"].unique_id == "ABC123"
assert len(mock_setup_entry.mock_calls) == 1
result3 = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": source},
data=discovery_info,
)
assert result3["type"] == "abort"
async def test_discovered_by_homekit_and_dhcp(hass):
"""Test we get the form with homekit and abort for dhcp source when we get both."""
mock_powerview_userdata = _get_mock_powerview_userdata()
with patch(
"homeassistant.components.hunterdouglas_powerview.UserData",
return_value=mock_powerview_userdata,
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_HOMEKIT},
data=HOMEKIT_DISCOVERY_INFO,
)
assert result["type"] == "form"
assert result["step_id"] == "link"
with patch(
"homeassistant.components.hunterdouglas_powerview.UserData",
return_value=mock_powerview_userdata,
):
result2 = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_DHCP},
data=DHCP_DISCOVERY_INFO,
)
assert result2["type"] == "abort"
assert result2["reason"] == "already_in_progress"
async def test_form_cannot_connect(hass):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
mock_powerview_userdata = _get_mock_powerview_userdata(
get_resources=asyncio.TimeoutError
)
with patch(
"homeassistant.components.hunterdouglas_powerview.UserData",
return_value=mock_powerview_userdata,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": "1.2.3.4"},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_no_data(hass):
"""Test we handle no data being returned from the hub."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
mock_powerview_userdata = _get_mock_powerview_userdata(userdata={"userData": {}})
with patch(
"homeassistant.components.hunterdouglas_powerview.UserData",
return_value=mock_powerview_userdata,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": "1.2.3.4"},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "unknown"}
async def test_form_unknown_exception(hass):
"""Test we handle unknown exception."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
mock_powerview_userdata = _get_mock_powerview_userdata(userdata={"userData": {}})
with patch(
"homeassistant.components.hunterdouglas_powerview.UserData",
return_value=mock_powerview_userdata,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": "1.2.3.4"},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "unknown"}
| 33.249258
| 87
| 0.682195
| 1,279
| 11,205
| 5.689601
| 0.111806
| 0.107462
| 0.098117
| 0.060464
| 0.817232
| 0.785763
| 0.765838
| 0.741514
| 0.737392
| 0.698502
| 0
| 0.011574
| 0.205801
| 11,205
| 336
| 88
| 33.348214
| 0.806158
| 0.003748
| 0
| 0.604478
| 0
| 0
| 0.176553
| 0.094702
| 0
| 0
| 0
| 0
| 0.149254
| 1
| 0.011194
| false
| 0
| 0.029851
| 0
| 0.052239
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
164bd05d32b19f9b79b2c0031c7c29bbae53e767
| 102
|
py
|
Python
|
terrascript/arukas/r.py
|
hugovk/python-terrascript
|
08fe185904a70246822f5cfbdc9e64e9769ec494
|
[
"BSD-2-Clause"
] | 507
|
2017-07-26T02:58:38.000Z
|
2022-01-21T12:35:13.000Z
|
terrascript/arukas/r.py
|
hugovk/python-terrascript
|
08fe185904a70246822f5cfbdc9e64e9769ec494
|
[
"BSD-2-Clause"
] | 135
|
2017-07-20T12:01:59.000Z
|
2021-10-04T22:25:40.000Z
|
terrascript/arukas/r.py
|
hugovk/python-terrascript
|
08fe185904a70246822f5cfbdc9e64e9769ec494
|
[
"BSD-2-Clause"
] | 81
|
2018-02-20T17:55:28.000Z
|
2022-01-31T07:08:40.000Z
|
# terrascript/arukas/r.py
import terrascript
class arukas_container(terrascript.Resource):
pass
| 14.571429
| 45
| 0.794118
| 12
| 102
| 6.666667
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.127451
| 102
| 6
| 46
| 17
| 0.898876
| 0.22549
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
1669a984c539884229b910755834681fe5388538
| 194
|
py
|
Python
|
attendance_log/attendance_log/doctype/device_ip_details/device_ip_details.py
|
KaviyaPeriyasamy/attendance_log
|
fb9636067c54924f51ecd4df3cb66cbac829b6af
|
[
"MIT"
] | null | null | null |
attendance_log/attendance_log/doctype/device_ip_details/device_ip_details.py
|
KaviyaPeriyasamy/attendance_log
|
fb9636067c54924f51ecd4df3cb66cbac829b6af
|
[
"MIT"
] | null | null | null |
attendance_log/attendance_log/doctype/device_ip_details/device_ip_details.py
|
KaviyaPeriyasamy/attendance_log
|
fb9636067c54924f51ecd4df3cb66cbac829b6af
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2021, alaa and contributors
# For license information, please see license.txt
# import frappe
from frappe.model.document import Document
class DeviceIPDetails(Document):
pass
| 21.555556
| 49
| 0.793814
| 25
| 194
| 6.16
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023952
| 0.139175
| 194
| 8
| 50
| 24.25
| 0.898204
| 0.530928
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
169981ceedf4a46996aee744b10545d746cd0b2f
| 68,076
|
py
|
Python
|
FasterRCNN_SE_Train/src/cntk/learners/__init__.py
|
springkim/FasterRCNN_SpringEdition
|
7771dcf310cd80a195c44839c83e65adcd5df37d
|
[
"MIT"
] | 9
|
2017-10-03T14:02:29.000Z
|
2019-05-31T01:07:40.000Z
|
FasterRCNN_SE_Train/src/cntk/learners/__init__.py
|
springkim/FasterRCNN_SpringEdition
|
7771dcf310cd80a195c44839c83e65adcd5df37d
|
[
"MIT"
] | 2
|
2018-06-21T11:09:36.000Z
|
2018-10-25T18:05:30.000Z
|
FasterRCNN_SE_Train/src/cntk/learners/__init__.py
|
springkim/FasterRCNN_SpringEdition
|
7771dcf310cd80a195c44839c83e65adcd5df37d
|
[
"MIT"
] | 1
|
2019-03-22T02:16:17.000Z
|
2019-03-22T02:16:17.000Z
|
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root
# for full license information.
# ==============================================================================
'''
A learner tunes a set of parameters during the training process. One can use
different learners for different sets of parameters. Currently, CNTK supports
the following learning algorithms:
- :func:`AdaDelta <adadelta>`
- :func:`AdaGrad <adagrad>`
- :func:`FSAdaGrad <fsadagrad>`
- :func:`Adam <adam>`
- :func:`MomentumSGD <momentum_sgd>`
- :func:`Nesterov <nesterov>`
- :func:`RMSProp <rmsprop>`
- :func:`SGD <sgd>`
- :func:`Learner with a customized update function <universal>`
'''
from enum import Enum, unique
import warnings
import numpy as np
import cntk.internal.utils as utils
from .. import cntk_py, NDArrayView, asarray
from cntk.internal import typemap
from ..internal.swig_helper import map_if_possible
@unique
class UnitType(Enum):
'''
Deprecated:: 2.2
Indicates whether the values in the schedule are specified on the per-sample or
per-minibatch basis.
'''
sample = 'sample'
'''
Schedule contains per-sample values.
'''
minibatch = 'minibatch'
'''
Schedule contains per-minibatch values (and need to be re-scaled by the learner
using the actual minibatch size in samples).
'''
def default_unit_gain_value():
'''
Returns true if by default momentum is applied in the unit-gain fashion.
'''
return cntk_py.default_unit_gain_value()
def set_default_unit_gain_value(value):
'''
Sets globally default unit-gain flag value.
'''
cntk_py.set_default_unit_gain_value(value)
def default_use_mean_gradient_value():
'''
Returns true if by default input gradient to learner is averaged.
'''
return cntk_py.default_use_mean_gradient_value()
def set_default_use_mean_gradient_value(value):
'''
Sets globally default use_mean_gradient_value.
'''
cntk_py.set_default_use_mean_gradient_value(value)
# an internal method to verify that the learning rate schedule
# has a proper (per-sample or per-MB schedule) type and raise
# an exception otherwise
def _verify_learning_rate_type(learning_rate):
if not isinstance(learning_rate,
cntk_py.training_double_parameter_schedule):
raise ValueError('learning_rate type (%s) not supported. '
'learning_rate must be a training schedule '
'(output of learning_rate_schedule() function)'
% type(learning_rate))
# an internal method to verify that the mometum schedule
# has a proper (per-MB or time-constant schedule) type and raise
# an exception otherwise
def _verify_momentum_type(momentum):
if not isinstance(momentum,
cntk_py.training_double_parameter_schedule):
raise ValueError('momentum type (%s) not supported. '
'momentum must be a training schedule '
'(output of momentum_schedule() or '
'momentum_as_time_constant_schedule() function)'
% type(momentum))
class Learner(cntk_py.Learner):
'''
Abstraction for learning a subset of parameters of a learnable function using first order gradient values.
For example momentum, AdaGrad, RMSProp, etc. are different types of learners with their own algorithms for
learning parameter values using first order gradients.
To instantiate a concrete learner, use the factory methods in this module.
'''
def update(self, gradient_values, training_sample_count):
'''
Update the parameters associated with this learner.
Args:
gradient_values (dict): maps :class:`~cntk.variables.Parameter` to
a NumPy array containing the first order gradient values for the
Parameter w.r.t. the training objective.
training_sample_count (int): number of samples in the minibatch
Returns:
bool: `False` to indicate that learning has stopped for all of the parameters associated with this learner
'''
var_nd_map = {var: NDArrayView.from_data(val) for var, val in
gradient_values.items()}
return super(Learner, self)._update(var_nd_map, training_sample_count)
@property
@typemap
def parameters(self):
'''
The set of parameters associated with this learner.
'''
return super(Learner, self).parameters()
def reset_learning_rate(self, learning_rate):
'''
Resets the learning rate. The new schedule is adjusted to be relative
to the current number of elapsed samples/sweeps: the 0 offset in
the new schedule corresponds to the current value of elapsed samples/sweeps,
and it takes effect from the current position in the training process onwards.
Args:
learning_rate (output of :func:`learning_parameter_schedule`)
learning rate to reset to
'''
_verify_learning_rate_type(learning_rate)
if not learning_rate.is_minibatch_size_explicitly_specified:
#If the schedule minibatch size is not explicitly specified, the learner's specification will take over
if self.minibatch_size is not None and self.minibatch_size != self.ignored_minibatch_size:
learning_rate.minibatch_size = self.minibatch_size
return super(Learner, self).reset_learning_rate(learning_rate)
def learning_rate(self):
'''
Current learning rate schedule.
'''
return super(Learner, self).learning_rate()
IGNORE = Learner.ignored_minibatch_size
'''
Indicate that the minibatch size is ignored in learning's hyper-parameter schedule.
'''
class UserLearner(cntk_py.Learner):
'''
Base class of all user-defined learners. To implement your own learning
algorithm, derive from this class and override the :meth:`update`.
Certain optimizers (such as AdaGrad) require additional storage.
This can be allocated and initialized during construction.
'''
def __init__(self, parameters, lr_schedule, as_numpy=True):
super(UserLearner, self).__init__(parameters, lr_schedule)
self.as_numpy = as_numpy
self.__disown__()
def _update(self, gradient_values, training_sample_count, sweep_end):
'''
Update the parameters and related state associated with this learner.
Args:
gradient_values (dict): maps :class:`~cntk.variables.Parameter`
to a NumPy array containing the gradient for the Parameter w.r.t.
the training objective.
training_sample_count (int): number of samples in the minibatch
sweep_end (bool): if the data is fed by a conforming reader, this
indicates whether a full pass over the dataset has just occurred.
Returns:
bool: `False` to indicate that learning has stopped for all of the
parameters associated with this learner
'''
map_if_possible(gradient_values)
if self.as_numpy:
var_nd_map = {var: asarray(gradient_values[var]) \
for var, val in gradient_values.items()}
else:
var_nd_map = gradient_values
return self.update(gradient_values, training_sample_count, sweep_end)
def update(self, gradient_values, training_sample_count, sweep_end):
'''
Update the parameters associated with this learner.
Args:
gradient_values (dict): maps :class:`~cntk.variables.Parameter` to
a NumPy array containing the first order gradient values for the
Parameter w.r.t. the training objective.
training_sample_count (int): number of samples in the minibatch
sweep_end (bool): if the data is fed by a conforming reader, this indicates
whether a full pass over the dataset has just occurred.
Returns:
bool: `False` to indicate that learning has stopped for all of the
parameters associated with this learner
'''
raise NotImplementedError('UserLearner.update must be overriden')
def _prepare_training_parameter_list(schedule):
if isinstance(schedule, list):
return [(1, v) if isinstance(v, (float, int)) else v for v in schedule]
else:
return schedule
@typemap
def training_parameter_schedule(schedule, unit=UnitType.minibatch, epoch_size=None):
'''
Deprecated:: 2.2
Create a training parameter schedule containing either per-sample (default)
or per-minibatch values.
Examples:
>>> # Use a fixed value 0.01 for all samples
>>> s = training_parameter_schedule(0.01)
>>> s[0], s[1]
(0.01, 0.01)
>>> # Use 0.01 for the first 1000 samples, then 0.001 for the remaining ones
>>> s = training_parameter_schedule([0.01, 0.001], epoch_size=1000)
>>> s[0], s[1], s[1000], s[1001]
(0.01, 0.01, 0.001, 0.001)
>>> # Use 0.1 for the first 12 epochs, then 0.01 for the next 15,
>>> # followed by 0.001 for the remaining ones, with a 100 samples in an epoch
>>> s = training_parameter_schedule([(12, 0.1), (15, 0.01), (1, 0.001)], epoch_size=100)
>>> s[0], s[1199], s[1200], s[2699], s[2700], s[5000]
(0.1, 0.1, 0.01, 0.01, 0.001, 0.001)
Args:
schedule (float or list): if float, is the parameter schedule to be used
for all samples. In case of list, the elements are used as the
values for ``epoch_size`` samples. If list contains pair, the second element is
used as a value for (``epoch_size`` x first element) samples
unit (:class:`UnitType`): one of two
* ``sample``: the returned schedule contains per-sample values
* ``minibatch``: the returned schedule contains per-minibatch values.
deprecated:: 2.2
Use minibatch_size parameter to specify the reference minbiatch size.
epoch_size (optional, int): number of samples as a scheduling unit.
Parameters in the schedule change their values every ``epoch_size``
samples. If no ``epoch_size`` is provided, this parameter is substituted
by the size of the full data sweep, in which case the scheduling unit is
the entire data sweep (as indicated by the MinibatchSource) and parameters
change their values on the sweep-by-sweep basis specified by the
``schedule``.
Returns:
training parameter schedule
See also:
:func:`learning_rate_schedule`
'''
if unit == UnitType.sample:
ref_minibatch_size = 1
else: # unit == UnitType.minibatch
ref_minibatch_size = cntk_py.training_double_parameter_schedule.ignored_minibatch_size
if isinstance(schedule, cntk_py.training_double_parameter_schedule):
schedule.is_minibatch_size_explicitly_specified = True #legacy learning parameter always have the specification
return schedule
if isinstance(schedule, (int, float)):
if epoch_size is not None:
warnings.warn('When providing the schedule as a number, epoch_size is ignored', RuntimeWarning)
if UnitType(unit):
schedule = cntk_py.training_double_parameter_schedule(*[schedule, ref_minibatch_size])
schedule.is_minibatch_size_explicitly_specified = True # legacy learning parameter always have the specification
return schedule
epoch_size = epoch_size if epoch_size is not None else cntk_py.training_double_parameter_schedule.full_data_sweep
if isinstance(schedule, list) and UnitType(unit):
schedule = _prepare_training_parameter_list(schedule)
args = [schedule, epoch_size, ref_minibatch_size]
schedule = cntk_py.training_double_parameter_schedule(*args)
schedule.is_minibatch_size_explicitly_specified = True #legacy learning parameter always have the specification
return schedule
raise ValueError(
'schedule must be either a float or a list, not %s' % type(schedule))
@typemap
def learning_parameter_schedule(schedule, minibatch_size=None, epoch_size=None):
'''
Create a learning parameter schedule.
Args:
schedule (float or list): if float, is the parameter schedule to be used
for all samples. In case of list [p_1, p_2, .., p_n], the i-th parameter p_i in the list is used as the
value from the (``epoch_size`` * (i-1) + 1)-th sample to the (``epoch_size`` * i)-th sample. If list contains
pair, i.e. [(num_epoch_1, p_1), (num_epoch_n, p_2), .., (num_epoch_n, p_n)], the i-th parameter is used as a
value from the (``epoch_size`` * (num_epoch_0 + ... + num_epoch_2 + ... + num_epoch_(i-1) + 1)-th sample to the
(``epoch_size`` * num_epoch_i)-th sample (taking num_epoch_0 = 0 as a special initialization).
minibatch_size (int): an integer to specify the minibatch size that schedule are designed for.
CNTK will scale the schedule internally so as to simulate the behavior of the schedule as much as possible
to match the designed effect. If it is not specified, CNTK will set to the special value :attr:`IGNORE`.
epoch_size (optional, int): number of samples as a scheduling unit.
Parameters in the schedule change their values every ``epoch_size``
samples. If no ``epoch_size`` is provided, this parameter is substituted
by the size of the full data sweep, in which case the scheduling unit is
the entire data sweep (as indicated by the MinibatchSource) and parameters
change their values on the sweep-by-sweep basis specified by the
``schedule``.
Returns:
learning parameter schedule
'''
if isinstance(schedule, cntk_py.training_double_parameter_schedule):
return schedule
is_minibatch_size_explicitly_specified = True
if minibatch_size == None:
is_minibatch_size_explicitly_specified = False
minibatch_size = 0
if isinstance(schedule, (int, float)):
if epoch_size is not None:
warnings.warn('When providing the schedule as a number, epoch_size is ignored', RuntimeWarning)
schedule = cntk_py.training_double_parameter_schedule(*[schedule, minibatch_size])
schedule.is_minibatch_size_explicitly_specified = is_minibatch_size_explicitly_specified
return schedule
epoch_size = epoch_size if epoch_size is not None else cntk_py.training_double_parameter_schedule.full_data_sweep
if isinstance(schedule, list):
schedule = _prepare_training_parameter_list(schedule)
args = [schedule, epoch_size, minibatch_size]
schedule = cntk_py.training_double_parameter_schedule(*args)
schedule.is_minibatch_size_explicitly_specified = is_minibatch_size_explicitly_specified
return schedule
raise ValueError(
'schedule must be either a float or a list, not %s' % type(schedule))
@typemap
def learning_rate_schedule(lr, unit, epoch_size=None):
'''
Deprecated:: 2.2
Create a learning rate schedule (using the same semantics as
:func:`training_parameter_schedule`).
Args:
lr (float or list): see parameter ``schedule`` in
:func:`training_parameter_schedule`.
unit (:class:`UnitType`): see parameter
``unit`` in :func:`training_parameter_schedule`.
deprecated:: 2.2
Use minibatch_size parameter to specify the reference minbiatch size instead.
epoch_size (int): see parameter ``epoch_size`` in
:func:`training_parameter_schedule`.
Returns:
learning rate schedule
See also:
:func:`training_parameter_schedule`
'''
return training_parameter_schedule(lr, unit, epoch_size)
@typemap
def momentum_schedule(momentum, epoch_size=None, minibatch_size = None):
'''
Create a per-minibatch momentum schedule (using the same semantics as
:func:`training_parameter_schedule` with the `unit=UnitType.minibatch`).
Args:
momentum (float or list): see parameter ``schedule`` in
:func:`training_parameter_schedule`.
epoch_size (int): see parameter ``epoch_size`` in
:func:`training_parameter_schedule`.
minibatch_size (int): an integer to specify the reference minibatch size that schedule are designed for;
CNTK will scale the schedule internally so as to simulate the behavior of the schedule as much as possible
to match the designed effect.
If you want to provide momentum values in a minibatch-size
agnostic way, use :func:`momentum_as_time_constant_schedule`.
Examples:
>>> # Use a fixed momentum of 0.99 for all samples
>>> m = momentum_schedule(0.99)
>>> # Use the momentum value 0.99 for the first 1000 samples,
>>> # then 0.9 for the remaining ones
>>> m = momentum_schedule([0.99,0.9], 1000)
>>> m[0], m[999], m[1000], m[1001]
(0.99, 0.99, 0.9, 0.9)
>>> # Use the momentum value 0.99 for the first 999 samples,
>>> # then 0.88 for the next 888 samples, and 0.77 for the
>>> # the remaining ones
>>> m = momentum_schedule([(999,0.99),(888,0.88),(0, 0.77)])
>>> m[0], m[998], m[999], m[999+888-1], m[999+888]
(0.99, 0.99, 0.88, 0.88, 0.77)
Returns:
momentum schedule
'''
return learning_parameter_schedule(momentum, minibatch_size, epoch_size)
@typemap
def momentum_as_time_constant_schedule(momentum, epoch_size=None):
'''
Create a momentum schedule in a minibatch-size agnostic way
(using the same semantics as :func:`training_parameter_schedule`
with `unit=UnitType.sample`).
Deprecated:: 2.2
This is for legacy API.
In this legacy API,::
#assume the desired minibatch size invariant constant momentum rate is: momentum_rate
momentum_time_constant = -minibatch_size/np.log(momentum_rate)
momentum = momentum_as_time_constant_schedule(momentum_time_constant)
The equivalent code in the latest API, ::
momentum = momentum_schedule(momentum_rate, minibatch_size = minibatch_size)
Args:
momentum (float or list): see parameter ``schedule`` in
:func:`training_parameter_schedule`.
epoch_size (int): see parameter ``epoch_size`` in
:func:`training_parameter_schedule`.
minibatch_size (int): an integer to specify the reference minibatch size that schedule are designed for;
CNTK will scale the schedule internally so as to simulate the behavior of the schedule as much as possible
to match the designed effect.
CNTK specifies momentum in a minibatch-size agnostic way as the time
constant (in samples) of a unit-gain 1st-order IIR filter. The value
specifies the number of samples after which a gradient has an effect of
1/e=37%.
If you want to specify the momentum per sample (or per minibatch),
use :func:`momentum_schedule`.
Examples:
>>> # Use a fixed momentum of 1100 for all samples
>>> m = momentum_as_time_constant_schedule(1100)
>>> # Use the time constant 1100 for the first 1000 samples,
>>> # then 1500 for the remaining ones
>>> m = momentum_as_time_constant_schedule([1100, 1500], 1000)
Returns:
momentum as time constant schedule
'''
if isinstance(momentum, (cntk_py.training_double_parameter_schedule)):
#the legacy momentum as time constant schedule: the ref minibatch size is always 1, so it is specified by definition
momentum.is_minibatch_size_explicitly_specified = True
return momentum
if isinstance(momentum, (int, float)):
if epoch_size is not None:
warnings.warn('When providing the schedule as a number, epoch_size is ignored', RuntimeWarning)
momentum = cntk_py.momentum_as_time_constant_schedule(momentum)
momentum.is_minibatch_size_explicitly_specified = True
return momentum
epoch_size = epoch_size if epoch_size is not None else cntk_py.training_double_parameter_schedule.full_data_sweep
if isinstance(momentum, list):
momentum = _prepare_training_parameter_list(momentum)
args = [momentum, epoch_size, 1] #momentum constant schedule's reference minibatch size is always per sample
momentum = cntk_py.training_double_parameter_schedule(*args)
momentum = cntk_py.momentum_as_time_constant_schedule(momentum)
momentum.is_minibatch_size_explicitly_specified = True
return momentum
raise ValueError(
'momentum must be either a float or a list, not %s' % type(momentum))
# TODO figure out how to pass infty to C++ in a portable way
def _infer_ref_minibatch_size_from_legacy_use_mean_gradient(ref_minibatch_size, use_mean_gradient):
if (ref_minibatch_size, use_mean_gradient) == (None, None):
#if ref_minibatch_size and the legacy use_mean_gradient are neither specified
return None
if ref_minibatch_size is not None:
if use_mean_gradient == True and ref_minibatch_size != cntk_py.Learner.ignored_minibatch_size:
Warning(
'Learner reference minibatch size is specified while use_mean_gradient (depreated option) is specified to True. Learner reference minibatch size will override the mean gradient behavior')
#if the ref_minibatch_size is specified, it overrides the legacay use_mean_gradient specification
return ref_minibatch_size
elif use_mean_gradient is not None:
#if the ref_minibatch_size is NOT specified, the legacay use_mean_gradient specification take in the effect
return cntk_py.Learner.ignored_minibatch_size if use_mean_gradient is True else None
return None
def _infer_learning_parameter_schedule(number_or_schedule, ref_minibatch_size, epoch_size, use_mean_gradient=None):
#the input is a number, create a new training parameter
if isinstance(number_or_schedule, (int, float)) or \
(isinstance(number_or_schedule, list) and all(isinstance(r, (int, float, tuple)) for r in number_or_schedule)):
#default is per minibatch if the reference minibatch size is not specified.
ref_minibatch_size = 0 if ref_minibatch_size is None else ref_minibatch_size
schedule = learning_parameter_schedule(number_or_schedule, ref_minibatch_size, epoch_size)
schedule.is_minibatch_size_explicitly_specified = ref_minibatch_size is not None
return schedule
elif isinstance(number_or_schedule,
cntk_py.training_double_parameter_schedule):
if not number_or_schedule.is_minibatch_size_explicitly_specified and ref_minibatch_size is not None:
#If the schedule minibatch size is not explicitly specified, the learner's specification will take over
number_or_schedule.minibatch_size = ref_minibatch_size
#for backward compatibility: use_mean_gradient = True and lr.unit = UnitType.sample
#this combination was there to avoid the double-scaling of gradients when the gradients are already mean gradients
if use_mean_gradient and number_or_schedule.minibatch_size == 1:
#override the learning rate's minibatch_size to IGNORE
number_or_schedule.minibatch_size = IGNORE
Warning('use_mean_gradient=True and learning_rate_schedule.unit=UnitType.sample is a deprecated combination. '
'Please use the new learner APIs: see https://www.cntk.ai/pythondocs/cntk.learners.html for details.')
return number_or_schedule
else:
raise ValueError('training parameter schedule type (%s) not supported. '
'training parameter schedule must be a training schedule '
% type(number_or_schedule))
def _infer_learning_rate_schedule_and_ref_minibatch_size(use_mean_gradient, ref_minibatch_size, schedule, epoch_size):
#if non-None reference_minibatch_size will take precedence otherwise according use_mean_gradient if it is True
ref_minibatch_size = _infer_ref_minibatch_size_from_legacy_use_mean_gradient(ref_minibatch_size, use_mean_gradient)
#if minibatch_size is not None, any schedules that are with unspecified reference minibatch size will be overrided.
schedule = _infer_learning_parameter_schedule(schedule, ref_minibatch_size, epoch_size, use_mean_gradient)
_verify_learning_rate_type(schedule)
return schedule, ref_minibatch_size
@typemap
def sgd(parameters, lr,
l1_regularization_weight=0.0, l2_regularization_weight=0.0,
gaussian_noise_injection_std_dev=0.0, gradient_clipping_threshold_per_sample=np.inf,
gradient_clipping_with_truncation=True, use_mean_gradient=None,
minibatch_size=None, epoch_size=None):
'''sgd(parameters, lr, l1_regularization_weight=0, l2_regularization_weight=0, gaussian_noise_injection_std_dev=0, gradient_clipping_threshold_per_sample=np.inf, gradient_clipping_with_truncation=True)
Creates an SGD learner instance to learn the parameters. See [1] for more
information on how to set the parameters.
Args:
parameters (list of parameters): list of network parameters to tune.
These can be obtained by the '.parameters()' method of the root
operator.
lr (float, list, output of :func:`learning_parameter_schedule`): a learning rate in float, or a learning rate schedule.
See also: :func:`learning_parameter_schedule`
l1_regularization_weight (float, optional): the L1 regularization weight per sample,
defaults to 0.0
l2_regularization_weight (float, optional): the L2 regularization weight per sample,
defaults to 0.0
gaussian_noise_injection_std_dev (float, optional): the standard deviation
of the Gaussian noise added to parameters post update, defaults to 0.0
gradient_clipping_threshold_per_sample (float, optional): clipping threshold
per sample, defaults to infinity
gradient_clipping_with_truncation (bool, default ``True``): use gradient clipping
with truncation
use_mean_gradient (bool, default ``False``): use averaged gradient as input to learner.
Defaults to the value returned by :func:`default_use_mean_gradient_value()`.
deprecated:: 2.2
Use minibatch_size parameter to specify the reference minbiatch size.
minibatch_size (int, default ``None``): The minibatch size that the learner's parameters are designed or pre-tuned for. This
size is usually set to the same as the minibatch data source's size. CNTK will perform automatic scaling of the parameters
to enable efficient model parameter update implementation while approximate the behavior of pre-designed and pre-tuned parameters.
In case that minibatch_size is not specified, CNTK will inherit the minibatch size from the learning rate schedule;
if the learning rate schedule does not specify the minibatch_size, CNTK will set it to :attr:`IGNORE`. Setting minibatch_size to :attr:`IGNORE`
will have the learner apply as it is preventing CNTK performing any hyper-parameter scaling. See also: :func:`learning_parameter_schedule`
epoch_size (optional, int): number of samples as a scheduling unit for learning rate. See also: :func:`learning_parameter_schedule`
Returns:
:class:`~cntk.learners.Learner`: learner instance that can be passed to
the :class:`~cntk.train.trainer.Trainer`
See also:
[1] L. Bottou. `Stochastic Gradient Descent Tricks
<https://www.microsoft.com/en-us/research/publication/stochastic-gradient-tricks>`_. Neural
Networks: Tricks of the Trade: Springer, 2012.
'''
lr, minibatch_size = _infer_learning_rate_schedule_and_ref_minibatch_size(use_mean_gradient, minibatch_size, lr, epoch_size)
gaussian_noise_injection_std_dev = \
training_parameter_schedule(
gaussian_noise_injection_std_dev)
additional_options = cntk_py.AdditionalLearningOptions()
additional_options.l1_regularization_weight = l1_regularization_weight
additional_options.l2_regularization_weight = l2_regularization_weight
additional_options.gaussian_noise_injection_std_dev = gaussian_noise_injection_std_dev
additional_options.gradient_clipping_threshold_per_sample = gradient_clipping_threshold_per_sample
additional_options.gradient_clipping_with_truncation = gradient_clipping_with_truncation
if minibatch_size is not None:
additional_options.dict_options[cntk_py.Learner._MINIBATCH_SIZE] = cntk_py.SizeTWrapper(minibatch_size) #need this to make proper typed DictionaryValue
opt = cntk_py.sgd_learner(parameters, lr, additional_options)
opt.is_minibatch_size_explicitly_specified = minibatch_size is not None
return opt
@typemap
def momentum_sgd(parameters, lr, momentum, unit_gain=default_unit_gain_value(),
l1_regularization_weight=0.0, l2_regularization_weight=0.0,
gaussian_noise_injection_std_dev=0.0, gradient_clipping_threshold_per_sample=np.inf,
gradient_clipping_with_truncation=True, use_mean_gradient=None,
minibatch_size=None, epoch_size=None):
'''momentum_sgd(parameters, lr, momentum, unit_gain=default_unit_gain_value(), l1_regularization_weight=0.0, l2_regularization_weight=0, gaussian_noise_injection_std_dev=0, gradient_clipping_threshold_per_sample=np.inf, gradient_clipping_with_truncation=True)
Creates a Momentum SGD learner instance to learn the parameters.
Args:
parameters (list of parameters): list of network parameters to tune.
These can be obtained by the root operator's ``parameters``.
lr (float, list, output of :func:`learning_parameter_schedule`): a learning rate in float, or a learning rate schedule.
See also: :func:`learning_parameter_schedule`
momentum (float, list, output of :func:`momentum_schedule`): momentum schedule.
For additional information, please refer to the :cntkwiki:`this CNTK Wiki article <BrainScript-SGD-Block#converting-learning-rate-and-momentum-parameters-from-other-toolkits>`.
unit_gain: when ``True``, momentum is interpreted as a unit-gain filter. Defaults
to the value returned by :func:`default_unit_gain_value`.
l1_regularization_weight (float, optional): the L1 regularization weight per sample,
defaults to 0.0
l2_regularization_weight (float, optional): the L2 regularization weight per sample,
defaults to 0.0
gaussian_noise_injection_std_dev (float, optional): the standard deviation
of the Gaussian noise added to parameters post update, defaults to 0.0
gradient_clipping_threshold_per_sample (float, optional): clipping threshold
per sample, defaults to infinity
gradient_clipping_with_truncation (bool, default ``True``): use gradient clipping
with truncation
use_mean_gradient (bool, default ``False``): use averaged gradient as input to learner.
Defaults to the value returned by :func:`default_use_mean_gradient_value()`.
deprecated:: 2.2
Use minibatch_size parameter to specify the reference minbiatch size.
minibatch_size (int, default ``None``): The minibatch size that the learner's parameters are designed or pre-tuned for. This
size is usually set to the same as the minibatch data source's size. CNTK will perform automatic scaling of the parameters
to enable efficient model parameter update implementation while approximate the behavior of pre-designed and pre-tuned parameters.
In case that minibatch_size is not specified, CNTK will inherit the minibatch size from the learning rate schedule;
if the learning rate schedule does not specify the minibatch_size, CNTK will set it to :attr:`IGNORE`. Setting minibatch_size to :attr:`IGNORE`
will have the learner apply as it is preventing CNTK performing any hyper-parameter scaling. See also: :func:`learning_parameter_schedule`
epoch_size (optional, int): number of samples as a scheduling unit for learning rate and momentum. See also: :func:`learning_parameter_schedule`
Returns:
:class:`~cntk.learners.Learner`: learner instance that can be passed to
the :class:`~cntk.train.trainer.Trainer`
'''
lr, minibatch_size = _infer_learning_rate_schedule_and_ref_minibatch_size(use_mean_gradient, minibatch_size, lr, epoch_size)
momentum = _infer_learning_parameter_schedule(momentum, minibatch_size, epoch_size)
_verify_momentum_type(momentum)
gaussian_noise_injection_std_dev = \
training_parameter_schedule(
gaussian_noise_injection_std_dev)
additional_options = cntk_py.AdditionalLearningOptions()
additional_options.l1_regularization_weight = l1_regularization_weight
additional_options.l2_regularization_weight = l2_regularization_weight
additional_options.gaussian_noise_injection_std_dev = gaussian_noise_injection_std_dev
additional_options.gradient_clipping_threshold_per_sample = gradient_clipping_threshold_per_sample
additional_options.gradient_clipping_with_truncation = gradient_clipping_with_truncation
if minibatch_size is not None:
additional_options.dict_options[cntk_py.Learner._MINIBATCH_SIZE] = cntk_py.SizeTWrapper(minibatch_size) #need this to make proper typed DictionaryValue
opt = cntk_py.momentum_sgd_learner(parameters, lr, momentum, unit_gain,
additional_options)
opt.is_minibatch_size_explicitly_specified = minibatch_size is not None
return opt
@typemap
def nesterov(parameters, lr, momentum, unit_gain=default_unit_gain_value(),
l1_regularization_weight=0.0, l2_regularization_weight=0.0,
gaussian_noise_injection_std_dev=0.0, gradient_clipping_threshold_per_sample=np.inf,
gradient_clipping_with_truncation=True, use_mean_gradient=None,
minibatch_size=None, epoch_size=None):
'''nesterov(parameters, lr, momentum, unit_gain=default_unit_gain_value(), l1_regularization_weight=0, l2_regularization_weight=0, gaussian_noise_injection_std_dev=0, gradient_clipping_threshold_per_sample=np.inf, gradient_clipping_with_truncation=True)
Creates a Nesterov SGD learner instance to learn the parameters. This was
originally proposed by Nesterov [1] in 1983 and then shown to work well in
a deep learning context by Sutskever, et al. [2].
Args:
parameters (list of parameters): list of network parameters to tune.
These can be obtained by the root operator's ``parameters``.
lr (float, list, output of :func:`learning_parameter_schedule`): a learning rate in float, or a learning rate schedule.
See also: :func:`learning_parameter_schedule`
momentum (float, list, output of :func:`momentum_schedule`): momentum schedule.
For additional information, please refer to the :cntkwiki:`this CNTK Wiki article <BrainScript-SGD-Block#converting-learning-rate-and-momentum-parameters-from-other-toolkits>`.
unit_gain: when ``True``, momentum is interpreted as a unit-gain filter. Defaults
to the value returned by :func:`default_unit_gain_value`.
l1_regularization_weight (float, optional): the L1 regularization weight per sample,
defaults to 0.0
l2_regularization_weight (float, optional): the L2 regularization weight per sample,
defaults to 0.0
gaussian_noise_injection_std_dev (float, optional): the standard deviation
of the Gaussian noise added to parameters post update, defaults to 0.0
gradient_clipping_threshold_per_sample (float, optional): clipping threshold
per sample, defaults to infinity
gradient_clipping_with_truncation (bool, default ``True``): use gradient clipping
with truncation
use_mean_gradient (bool, default ``False``): use averaged gradient as input to learner.
Defaults to the value returned by :func:`default_use_mean_gradient_value()`.
deprecated:: 2.2
Use minibatch_size parameter to specify the reference minbiatch size.
minibatch_size (int, default ``None``): The minibatch size that the learner's parameters are designed or pre-tuned for. This
size is usually set to the same as the minibatch data source's size. CNTK will perform automatic scaling of the parameters
to enable efficient model parameter update implementation while approximate the behavior of pre-designed and pre-tuned parameters.
In case that minibatch_size is not specified, CNTK will inherit the minibatch size from the learning rate schedule;
if the learning rate schedule does not specify the minibatch_size, CNTK will set it to :attr:`IGNORE`. Setting minibatch_size to :attr:`IGNORE`
will have the learner apply as it is preventing CNTK performing any hyper-parameter scaling. See also: :func:`learning_parameter_schedule`
epoch_size (optional, int): number of samples as a scheduling unit for learning rate and momentum. See also: :func:`learning_parameter_schedule`
Returns:
:class:`~cntk.learners.Learner`: learner instance that can be passed to
the :class:`~cntk.train.trainer.Trainer`
See also:
[1] Y. Nesterov. A Method of Solving a Convex Programming Problem with Convergence Rate O(1/ sqrt(k)). Soviet Mathematics Doklady, 1983.
[2] I. Sutskever, J. Martens, G. Dahl, and G. Hinton. `On the
Importance of Initialization and Momentum in Deep Learning
<http://www.cs.toronto.edu/~fritz/absps/momentum.pdf>`_. Proceedings
of the 30th International Conference on Machine Learning, 2013.
'''
lr, minibatch_size = _infer_learning_rate_schedule_and_ref_minibatch_size(use_mean_gradient, minibatch_size, lr, epoch_size)
momentum = _infer_learning_parameter_schedule(momentum, minibatch_size, epoch_size)
_verify_momentum_type(momentum)
gaussian_noise_injection_std_dev = \
training_parameter_schedule(
gaussian_noise_injection_std_dev)
additional_options = cntk_py.AdditionalLearningOptions()
additional_options.l1_regularization_weight = l1_regularization_weight
additional_options.l2_regularization_weight = l2_regularization_weight
additional_options.gaussian_noise_injection_std_dev = gaussian_noise_injection_std_dev
additional_options.gradient_clipping_threshold_per_sample = gradient_clipping_threshold_per_sample
additional_options.gradient_clipping_with_truncation = gradient_clipping_with_truncation
if minibatch_size is not None:
additional_options.dict_options[cntk_py.Learner._MINIBATCH_SIZE] = cntk_py.SizeTWrapper(minibatch_size) #need this to make proper typed DictionaryValue
opt=cntk_py.nesterov_learner(parameters, lr, momentum, unit_gain,
additional_options)
opt.is_minibatch_size_explicitly_specified = minibatch_size is not None
return opt
@typemap
def adadelta(parameters, lr=learning_rate_schedule(1, UnitType.sample), rho=0.95, epsilon=1e-8,
l1_regularization_weight=0.0, l2_regularization_weight=0.0,
gaussian_noise_injection_std_dev=0.0, gradient_clipping_threshold_per_sample=np.inf,
gradient_clipping_with_truncation=True, use_mean_gradient=None,
minibatch_size=None, epoch_size=None):
'''adadelta(parameters, lr, rho, epsilon, l1_regularization_weight=0, l2_regularization_weight=0, gaussian_noise_injection_std_dev=0, gradient_clipping_threshold_per_sample=np.inf, gradient_clipping_with_truncation=True)
Creates an AdaDelta learner instance to learn the parameters. See [1] for
more information.
Args:
parameters (list of parameters): list of network parameters to tune.
These can be obtained by the root operator's ``parameters``.
lr (float, list, output of :func:`learning_parameter_schedule`): a learning rate in float, or a learning rate schedule.
See also: :func:`learning_parameter_schedule`
rho (float): exponential smooth factor for each minibatch.
epsilon (float): epsilon for sqrt.
l1_regularization_weight (float, optional): the L1 regularization weight per sample,
defaults to 0.0
l2_regularization_weight (float, optional): the L2 regularization weight per sample,
defaults to 0.0
gaussian_noise_injection_std_dev (float, optional): the standard deviation
of the Gaussian noise added to parameters post update, defaults to 0.0
gradient_clipping_threshold_per_sample (float, optional): clipping threshold
per sample, defaults to infinity
gradient_clipping_with_truncation (bool, default ``True``): use gradient clipping
with truncation
use_mean_gradient (bool, default ``False``): use averaged gradient as input to learner.
Defaults to the value returned by :func:`default_use_mean_gradient_value()`.
deprecated:: 2.2
Use minibatch_size parameter to specify the reference minbiatch size.
minibatch_size (int, default ``None``): The minibatch size that the learner's parameters are designed or pre-tuned for. This
size is usually set to the same as the minibatch data source's size. CNTK will perform automatic scaling of the parameters
to enable efficient model parameter update implementation while approximate the behavior of pre-designed and pre-tuned parameters.
In case that minibatch_size is not specified, CNTK will inherit the minibatch size from the learning rate schedule;
if the learning rate schedule does not specify the minibatch_size, CNTK will set it to :attr:`IGNORE`. Setting minibatch_size to :attr:`IGNORE`
will have the learner apply as it is preventing CNTK performing any hyper-parameter scaling. See also: :func:`learning_parameter_schedule`
epoch_size (optional, int): number of samples as a scheduling unit for learning rate. See also: :func:`learning_parameter_schedule`
Returns:
:class:`~cntk.learners.Learner`: learner instance that can be passed to
the :class:`~cntk.train.trainer.Trainer`
See also
[1] Matthew D. Zeiler, `ADADELTA: An Adaptive Learning Rate Method
<https://arxiv.org/pdf/1212.5701.pdf>`_.
'''
gaussian_noise_injection_std_dev = \
training_parameter_schedule(
gaussian_noise_injection_std_dev)
lr, minibatch_size = _infer_learning_rate_schedule_and_ref_minibatch_size(use_mean_gradient, minibatch_size, lr, epoch_size)
additional_options = cntk_py.AdditionalLearningOptions()
additional_options.l1_regularization_weight = l1_regularization_weight
additional_options.l2_regularization_weight = l2_regularization_weight
additional_options.gaussian_noise_injection_std_dev = gaussian_noise_injection_std_dev
additional_options.gradient_clipping_threshold_per_sample = gradient_clipping_threshold_per_sample
additional_options.gradient_clipping_with_truncation = gradient_clipping_with_truncation
minibatch_size = _infer_ref_minibatch_size_from_legacy_use_mean_gradient(minibatch_size, use_mean_gradient)
if minibatch_size is not None:
additional_options.dict_options[cntk_py.Learner._MINIBATCH_SIZE] = cntk_py.SizeTWrapper(minibatch_size) #need this to make proper typed DictionaryValue
opt = cntk_py.ada_delta_learner(parameters, lr, rho, epsilon,
additional_options)
opt.is_minibatch_size_explicitly_specified = minibatch_size is not None
return opt
@typemap
def adagrad(parameters, lr, need_ave_multiplier=True,
l1_regularization_weight=0.0, l2_regularization_weight=0.0,
gaussian_noise_injection_std_dev=0.0, gradient_clipping_threshold_per_sample=np.inf,
gradient_clipping_with_truncation=True, use_mean_gradient=None,
minibatch_size=None, epoch_size=None):
'''adagrad(parameters, lr, need_ave_multiplier=True, l1_regularization_weight=0, l2_regularization_weight=0, gaussian_noise_injection_std_dev=0, gradient_clipping_threshold_per_sample=np.inf, gradient_clipping_with_truncation=True)
Creates an AdaGrad learner instance to learn the parameters. See [1] for
more information.
Args:
parameters (list of parameters): list of network parameters to tune.
These can be obtained by the root operator's ``parameters``.
lr (float, list, output of :func:`learning_parameter_schedule`): a learning rate in float, or a learning rate schedule.
See also: :func:`learning_parameter_schedule`
need_ave_multiplier (bool, default):
l1_regularization_weight (float, optional): the L1 regularization weight per sample,
defaults to 0.0
l2_regularization_weight (float, optional): the L2 regularization weight per sample,
defaults to 0.0
gaussian_noise_injection_std_dev (float, optional): the standard deviation
of the Gaussian noise added to parameters post update, defaults to 0.0
gradient_clipping_threshold_per_sample (float, optional): clipping threshold
per sample, defaults to infinity
gradient_clipping_with_truncation (bool, default ``True``): use gradient clipping
with truncation
use_mean_gradient (bool, default ``False``): use averaged gradient as input to learner.
Defaults to the value returned by :func:`default_use_mean_gradient_value()`.
deprecated:: 2.2
Use minibatch_size parameter to specify the reference minbiatch size.
minibatch_size (int, default ``None``): The minibatch size that the learner's parameters are designed or pre-tuned for. This
size is usually set to the same as the minibatch data source's size. CNTK will perform automatic scaling of the parameters
to enable efficient model parameter update implementation while approximate the behavior of pre-designed and pre-tuned parameters.
In case that minibatch_size is not specified, CNTK will inherit the minibatch size from the learning rate schedule;
if the learning rate schedule does not specify the minibatch_size, CNTK will set it to :attr:`IGNORE`. Setting minibatch_size to :attr:`IGNORE`
will have the learner apply as it is preventing CNTK performing any hyper-parameter scaling. See also: :func:`learning_parameter_schedule`
epoch_size (optional, int): number of samples as a scheduling unit for learning rate. See also: :func:`learning_parameter_schedule`
Returns:
:class:`~cntk.learners.Learner`: learner instance that can be passed to
the :class:`~cntk.train.trainer.Trainer`
See also:
[1] J. Duchi, E. Hazan, and Y. Singer. `Adaptive Subgradient Methods
for Online Learning and Stochastic Optimization
<http://www.magicbroom.info/Papers/DuchiHaSi10.pdf>`_. The Journal of
Machine Learning Research, 2011.
'''
lr, minibatch_size = _infer_learning_rate_schedule_and_ref_minibatch_size(use_mean_gradient, minibatch_size, lr, epoch_size)
gaussian_noise_injection_std_dev = \
training_parameter_schedule(
gaussian_noise_injection_std_dev)
additional_options = cntk_py.AdditionalLearningOptions()
additional_options.l1_regularization_weight = l1_regularization_weight
additional_options.l2_regularization_weight = l2_regularization_weight
additional_options.gaussian_noise_injection_std_dev = gaussian_noise_injection_std_dev
additional_options.gradient_clipping_threshold_per_sample = gradient_clipping_threshold_per_sample
additional_options.gradient_clipping_with_truncation = gradient_clipping_with_truncation
minibatch_size = _infer_ref_minibatch_size_from_legacy_use_mean_gradient(minibatch_size, use_mean_gradient)
if minibatch_size is not None:
additional_options.dict_options[cntk_py.Learner._MINIBATCH_SIZE] = cntk_py.SizeTWrapper(minibatch_size) #need this to make proper typed DictionaryValue
opt = cntk_py.ada_grad_learner(parameters, lr, need_ave_multiplier,
additional_options)
opt.is_minibatch_size_explicitly_specified = minibatch_size is not None
return opt
@typemap
def fsadagrad(parameters, lr, momentum, unit_gain=default_unit_gain_value(),
variance_momentum=momentum_as_time_constant_schedule(720000),
l1_regularization_weight=0.0, l2_regularization_weight=0.0,
gaussian_noise_injection_std_dev=0.0, gradient_clipping_threshold_per_sample=np.inf,
gradient_clipping_with_truncation=True, use_mean_gradient=None,
minibatch_size=None, epoch_size=None):
'''fsadagrad(parameters, lr, momentum, unit_gain=default_unit_gain_value(), variance_momentum=momentum_as_time_constant_schedule(720000), l1_regularization_weight=0, l2_regularization_weight=0, gaussian_noise_injection_std_dev=0, gradient_clipping_threshold_per_sample=np.inf, gradient_clipping_with_truncation=True)
Creates an FSAdaGrad learner instance to learn the parameters.
Args:
parameters (list of parameters): list of network parameters to tune.
These can be obtained by the root operator's ``parameters``.
lr (float, list, output of :func:`learning_parameter_schedule`): a learning rate in float, or a learning rate schedule.
See also: :func:`learning_parameter_schedule`
momentum (float, list, output of :func:`momentum_schedule`): momentum schedule.
For additional information, please refer to the :cntkwiki:`this CNTK Wiki article <BrainScript-SGD-Block#converting-learning-rate-and-momentum-parameters-from-other-toolkits>`.
unit_gain: when ``True``, momentum is interpreted as a unit-gain filter. Defaults
to the value returned by :func:`default_unit_gain_value`.
variance_momentum (float, list, output of :func:`momentum_schedule` or :func:`momentum_as_time_constant_schedule`): variance momentum schedule. Defaults
to ``momentum_as_time_constant_schedule(720000)``.
l1_regularization_weight (float, optional): the L1 regularization weight per sample,
defaults to 0.0
l2_regularization_weight (float, optional): the L2 regularization weight per sample,
defaults to 0.0
gaussian_noise_injection_std_dev (float, optional): the standard deviation
of the Gaussian noise added to parameters post update, defaults to 0.0
gradient_clipping_threshold_per_sample (float, optional): clipping threshold
per sample, defaults to infinity
gradient_clipping_with_truncation (bool, default ``True``): use gradient clipping
with truncation
use_mean_gradient (bool, default ``False``): use averaged gradient as input to learner.
Defaults to the value returned by :func:`default_use_mean_gradient_value()`.
deprecated:: 2.2
Use minibatch_size parameter to specify the reference minbiatch size.
minibatch_size (int, default ``None``): The minibatch size that the learner's parameters are designed or pre-tuned for. This
size is usually set to the same as the minibatch data source's size. CNTK will perform automatic scaling of the parameters
to enable efficient model parameter update implementation while approximate the behavior of pre-designed and pre-tuned parameters.
In case that minibatch_size is not specified, CNTK will inherit the minibatch size from the learning rate schedule;
if the learning rate schedule does not specify the minibatch_size, CNTK will set it to :attr:`IGNORE`. Setting minibatch_size to :attr:`IGNORE`
will have the learner apply as it is preventing CNTK performing any hyper-parameter scaling. See also: :func:`learning_parameter_schedule`
epoch_size (optional, int): number of samples as a scheduling unit for learning rate, momentum and variance_momentum. See also: :func:`learning_parameter_schedule`
Returns:
:class:`~cntk.learners.Learner`: learner instance that can be passed to
the :class:`~cntk.train.trainer.Trainer`
'''
lr, minibatch_size = _infer_learning_rate_schedule_and_ref_minibatch_size(use_mean_gradient, minibatch_size, lr, epoch_size)
momentum = _infer_learning_parameter_schedule(momentum, minibatch_size, epoch_size)
_verify_momentum_type(momentum)
variance_momentum = _infer_learning_parameter_schedule(variance_momentum, minibatch_size, epoch_size)
_verify_momentum_type(variance_momentum)
gaussian_noise_injection_std_dev = \
training_parameter_schedule(
gaussian_noise_injection_std_dev)
additional_options = cntk_py.AdditionalLearningOptions()
additional_options.l1_regularization_weight = l1_regularization_weight
additional_options.l2_regularization_weight = l2_regularization_weight
additional_options.gaussian_noise_injection_std_dev = gaussian_noise_injection_std_dev
additional_options.gradient_clipping_threshold_per_sample = gradient_clipping_threshold_per_sample
additional_options.gradient_clipping_with_truncation = gradient_clipping_with_truncation
minibatch_size = _infer_ref_minibatch_size_from_legacy_use_mean_gradient(minibatch_size, use_mean_gradient)
if minibatch_size is not None:
additional_options.dict_options[cntk_py.Learner._MINIBATCH_SIZE] = cntk_py.SizeTWrapper(minibatch_size) #need this to make proper typed DictionaryValue
opt = cntk_py.fsada_grad_learner(parameters, lr, momentum, unit_gain,
variance_momentum, additional_options)
opt.is_minibatch_size_explicitly_specified = minibatch_size is not None
return opt
@typemap
def adam(parameters, lr, momentum, unit_gain=default_unit_gain_value(),
variance_momentum=momentum_as_time_constant_schedule(720000),
l1_regularization_weight=0.0, l2_regularization_weight=0.0,
gaussian_noise_injection_std_dev=0.0, gradient_clipping_threshold_per_sample=np.inf,
gradient_clipping_with_truncation=True, use_mean_gradient=None, epsilon=1e-8, adamax=False,
minibatch_size=None, epoch_size=None):
'''adam(parameters, lr, momentum, unit_gain=default_unit_gain_value(), variance_momentum=momentum_as_time_constant_schedule(720000), l1_regularization_weight=0, l2_regularization_weight=0, gaussian_noise_injection_std_dev=0, gradient_clipping_threshold_per_sample=np.inf, gradient_clipping_with_truncation=True, epsilon=1e-8, adamax=False)
Creates an Adam learner instance to learn the parameters. See [1] for more
information.
Args:
parameters (list of parameters): list of network parameters to tune.
These can be obtained by the root operator's ``parameters``.
lr (float, list, output of :func:`learning_parameter_schedule`): a learning rate in float, or a learning rate schedule.
See also: :func:`learning_parameter_schedule`
momentum (float, list, output of :func:`momentum_schedule`): momentum schedule. Note that this is the beta1 parameter in the Adam paper [1].
For additional information, please refer to the :cntkwiki:`this CNTK Wiki article <BrainScript-SGD-Block#converting-learning-rate-and-momentum-parameters-from-other-toolkits>`.
unit_gain: when ``True``, momentum is interpreted as a unit-gain filter. Defaults
to the value returned by :func:`default_unit_gain_value`.
variance_momentum (float, list, output of :func:`momentum_schedule` or :func:`momentum_as_time_constant_schedule`): variance momentum schedule.
Note that this is the beta1 parameter in the Adam paper [1]. Defaults to ``momentum_as_time_constant_schedule(720000)``.
l1_regularization_weight (float, optional): the L1 regularization weight per sample,
defaults to 0.0
l2_regularization_weight (float, optional): the L2 regularization weight per sample,
defaults to 0.0
gaussian_noise_injection_std_dev (float, optional): the standard deviation
of the Gaussian noise added to parameters post update, defaults to 0.0
gradient_clipping_threshold_per_sample (float, optional): clipping threshold
per sample, defaults to infinity
gradient_clipping_with_truncation (bool, default ``True``): use gradient clipping
with truncation
use_mean_gradient (bool, default ``False``): use averaged gradient as input to learner.
Defaults to the value returned by :func:`default_use_mean_gradient_value()`.
deprecated:: 2.2
Use minibatch_size parameter to specify the reference minbiatch size.
epsilon (float, optional): numerical stability constant,
defaults to 1e-8
adamax: when ``True``, use infinity-norm variance momentum update instead of L2. Defaults
to False
minibatch_size (int, default ``None``): The minibatch size that the learner's parameters are designed or pre-tuned for. This
size is usually set to the same as the minibatch data source's size. CNTK will perform automatic scaling of the parameters
to enable efficient model parameter update implementation while approximate the behavior of pre-designed and pre-tuned parameters.
In case that minibatch_size is not specified, CNTK will inherit the minibatch size from the learning rate schedule;
if the learning rate schedule does not specify the minibatch_size, CNTK will set it to :attr:`IGNORE`. Setting minibatch_size to :attr:`IGNORE`
will have the learner apply as it is preventing CNTK performing any hyper-parameter scaling. See also: :func:`learning_parameter_schedule`
epoch_size (optional, int): number of samples as a scheduling unit for learning rate, momentum and variance_momentum. See also: :func:`learning_parameter_schedule`
Returns:
:class:`~cntk.learners.Learner`: learner instance that can be passed to
the :class:`~cntk.train.trainer.Trainer`
See also:
[1] D. Kingma, J. Ba. `Adam: A Method for Stochastic Optimization
<https://arxiv.org/abs/1412.6980>`_. International Conference for
Learning Representations, 2015.
'''
lr, minibatch_size = _infer_learning_rate_schedule_and_ref_minibatch_size(use_mean_gradient, minibatch_size, lr, epoch_size)
momentum = _infer_learning_parameter_schedule(momentum, minibatch_size, epoch_size)
_verify_momentum_type(momentum)
variance_momentum = _infer_learning_parameter_schedule(variance_momentum, minibatch_size, epoch_size)
_verify_momentum_type(variance_momentum)
gaussian_noise_injection_std_dev = \
training_parameter_schedule(
gaussian_noise_injection_std_dev)
additional_options = cntk_py.AdditionalLearningOptions()
additional_options.l1_regularization_weight = l1_regularization_weight
additional_options.l2_regularization_weight = l2_regularization_weight
additional_options.gaussian_noise_injection_std_dev = gaussian_noise_injection_std_dev
additional_options.gradient_clipping_threshold_per_sample = gradient_clipping_threshold_per_sample
additional_options.gradient_clipping_with_truncation = gradient_clipping_with_truncation
if minibatch_size is not None:
additional_options.dict_options[cntk_py.Learner._MINIBATCH_SIZE] = cntk_py.SizeTWrapper(minibatch_size) #need this to make proper typed DictionaryValue
opt = cntk_py.adam_learner(parameters, lr, momentum, unit_gain,
variance_momentum, epsilon, adamax, additional_options)
opt.is_minibatch_size_explicitly_specified = minibatch_size is not None
return opt
@typemap
def rmsprop(parameters, lr,
gamma, inc, dec, max, min,
need_ave_multiplier=True,
l1_regularization_weight=0.0, l2_regularization_weight=0.0,
gaussian_noise_injection_std_dev=0.0, gradient_clipping_threshold_per_sample=np.inf,
gradient_clipping_with_truncation=True, use_mean_gradient=None,
minibatch_size=None, epoch_size=None):
'''rmsprop(parameters, lr, gamma, inc, dec, max, min, need_ave_multiplier=True, l1_regularization_weight=0, l2_regularization_weight=0, gaussian_noise_injection_std_dev=0, gradient_clipping_threshold_per_sample=np.inf, gradient_clipping_with_truncation=True)
Creates an RMSProp learner instance to learn the parameters.
Args:
parameters (list of parameters): list of network parameters to tune.
These can be obtained by the root operator's ``parameters``.
lr (float, list, output of :func:`learning_parameter_schedule`): a learning rate in float, or a learning rate schedule.
See also: :func:`learning_parameter_schedule`
gamma (float): Trade-off factor for current and previous gradients. Common value is 0.95. Should be in range (0.0, 1.0)
inc (float): Increasing factor when trying to adjust current learning_rate. Should be greater than 1
dec (float): Decreasing factor when trying to adjust current learning_rate. Should be in range (0.0, 1.0)
max (float): Maximum scale allowed for the initial learning_rate. Should be greater than zero and min
min (float): Minimum scale allowed for the initial learning_rate. Should be greater than zero
need_ave_multiplier (bool, default ``True``):
l1_regularization_weight (float, optional): the L1 regularization weight per sample,
defaults to 0.0
l2_regularization_weight (float, optional): the L2 regularization weight per sample,
defaults to 0.0
gaussian_noise_injection_std_dev (float, optional): the standard deviation
of the Gaussian noise added to parameters post update, defaults to 0.0
gradient_clipping_threshold_per_sample (float, optional): clipping threshold
per sample, defaults to infinity
gradient_clipping_with_truncation (bool, default ``True``): use gradient clipping
with truncation
use_mean_gradient (bool, default ``False``): use averaged gradient as input to learner.
Defaults to the value returned by :func:`default_use_mean_gradient_value()`.
deprecated:: 2.2
Use minibatch_size parameter to specify the reference minbiatch size.
minibatch_size (int, default ``None``): The minibatch size that the learner's parameters are designed or pre-tuned for. This
size is usually set to the same as the minibatch data source's size. CNTK will perform automatic scaling of the parameters
to enable efficient model parameter update implementation while approximate the behavior of pre-designed and pre-tuned parameters.
In case that minibatch_size is not specified, CNTK will inherit the minibatch size from the learning rate schedule;
if the learning rate schedule does not specify the minibatch_size, CNTK will set it to :attr:`IGNORE`. Setting minibatch_size to :attr:`IGNORE`
will have the learner apply as it is preventing CNTK performing any hyper-parameter scaling. See also: :func:`learning_parameter_schedule`
epoch_size (optional, int): number of samples as a scheduling unit for learning rate. See also: :func:`learning_parameter_schedule`
Returns:
:class:`~cntk.learners.Learner`: learner instance that can be passed to
the :class:`~cntk.train.trainer.Trainer`
'''
lr, minibatch_size = _infer_learning_rate_schedule_and_ref_minibatch_size(use_mean_gradient, minibatch_size, lr, epoch_size)
gaussian_noise_injection_std_dev = \
training_parameter_schedule(
gaussian_noise_injection_std_dev)
additional_options = cntk_py.AdditionalLearningOptions()
additional_options.l1_regularization_weight = l1_regularization_weight
additional_options.l2_regularization_weight = l2_regularization_weight
additional_options.gaussian_noise_injection_std_dev = gaussian_noise_injection_std_dev
additional_options.gradient_clipping_threshold_per_sample = gradient_clipping_threshold_per_sample
additional_options.gradient_clipping_with_truncation = gradient_clipping_with_truncation
minibatch_size = _infer_ref_minibatch_size_from_legacy_use_mean_gradient(minibatch_size, use_mean_gradient)
if minibatch_size is not None:
additional_options.dict_options[cntk_py.Learner._MINIBATCH_SIZE] = cntk_py.SizeTWrapper(minibatch_size) #need this to make proper typed DictionaryValue
opt = cntk_py.rmsprop_learner(parameters, lr, gamma, inc, dec, max, min,
need_ave_multiplier, additional_options)
opt.is_minibatch_size_explicitly_specified = minibatch_size is not None
return opt
@typemap
def universal(update_func, parameters):
'''
Creates a learner which uses a CNTK function to update the parameters.
Args:
update_func: function that takes parameters and gradients as arguments and
returns a :class:`~cntk.ops.functions.Function` that performs the
desired updates. The returned function updates the parameters by
means of containing :func:`~cntk.ops.assign` operations.
If ``update_func`` does not contain :func:`~cntk.ops.assign` operations
the parameters will not be updated.
parameters (list): list of network parameters to tune.
These can be obtained by the root operator's `parameters`.
Returns:
:class:`~cntk.learners.Learner`: learner instance that can be passed to
the :class:`~cntk.train.trainer.Trainer`
Examples:
>>> def my_adagrad(parameters, gradients):
... accumulators = [C.constant(0, shape=p.shape, dtype=p.dtype, name='accum') for p in parameters]
... update_funcs = []
... for p, g, a in zip(parameters, gradients, accumulators):
... accum_new = C.assign(a, g * g)
... update_funcs.append(C.assign(p, p - 0.01 * g / C.sqrt(accum_new + 1e-6)))
... return C.combine(update_funcs)
...
>>> x = C.input_variable((10,))
>>> y = C.input_variable((2,))
>>> z = C.layers.Sequential([C.layers.Dense(100, activation=C.relu), C.layers.Dense(2)])(x)
>>> loss = C.cross_entropy_with_softmax(z, y)
>>> learner = C.universal(my_adagrad, z.parameters)
>>> trainer = C.Trainer(z, loss, learner)
>>> # now trainer can be used as any other Trainer
'''
from .. import constant
args, _ = utils.get_python_function_arguments(update_func)
if len(args) != 2:
raise ValueError('update_func must be a function that accepts two arguments (parameters, gradients)')
gradients = []
for p in parameters:
if any(dim<0 for dim in p.shape):
raise ValueError('parameter %s has inferred dimensions. Please create the learner after all parameter shapes have been determined'%str(p))
gradients.append(constant(0, shape=p.shape, dtype=p.dtype, name='grad'))
#TODO: add additional options and learning context to the parameters of the updat_func so that the update function
# can make use of the context and additional options
result = update_func(parameters, gradients)
return cntk_py.universal_learner(parameters, gradients, result)
| 56.967364
| 343
| 0.728906
| 9,004
| 68,076
| 5.281986
| 0.071302
| 0.064509
| 0.020185
| 0.029437
| 0.800269
| 0.769676
| 0.745789
| 0.729263
| 0.71364
| 0.702685
| 0
| 0.012619
| 0.203772
| 68,076
| 1,194
| 344
| 57.015075
| 0.864789
| 0.593807
| 0
| 0.598465
| 0
| 0.005115
| 0.054476
| 0.005048
| 0
| 0
| 0
| 0.001675
| 0
| 1
| 0.079284
| false
| 0
| 0.02046
| 0
| 0.204604
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
16a11a3584c157d4027243355c7e3a6df5e85f2c
| 142
|
py
|
Python
|
SRC/Chapter_01-The-Basics/08_range_example.py
|
archeranimesh/HeadFirstPython
|
8e0c84871328a6bf3a8d723341be56298440f29b
|
[
"MIT"
] | 1
|
2020-12-26T19:37:14.000Z
|
2020-12-26T19:37:14.000Z
|
SRC/Chapter_01-The-Basics/08_range_example.py
|
archeranimesh/HeadFirstPython
|
8e0c84871328a6bf3a8d723341be56298440f29b
|
[
"MIT"
] | null | null | null |
SRC/Chapter_01-The-Basics/08_range_example.py
|
archeranimesh/HeadFirstPython
|
8e0c84871328a6bf3a8d723341be56298440f29b
|
[
"MIT"
] | null | null | null |
print(list(range(5)))
print(list(range(5, 10)))
print(list(range(0, 10, 2)))
print(list(range(10, 0, -2)))
print(list(range(0, 10, -2)))
| 12.909091
| 29
| 0.605634
| 27
| 142
| 3.185185
| 0.259259
| 0.523256
| 0.813953
| 0.348837
| 0.418605
| 0.418605
| 0
| 0
| 0
| 0
| 0
| 0.128
| 0.119718
| 142
| 10
| 30
| 14.2
| 0.56
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
16b85881135eefe45aa0a9b794f4863f2eda78f1
| 98
|
py
|
Python
|
terrascript/triton/__init__.py
|
vutsalsinghal/python-terrascript
|
3b9fb5ad77453d330fb0cd03524154a342c5d5dc
|
[
"BSD-2-Clause"
] | null | null | null |
terrascript/triton/__init__.py
|
vutsalsinghal/python-terrascript
|
3b9fb5ad77453d330fb0cd03524154a342c5d5dc
|
[
"BSD-2-Clause"
] | null | null | null |
terrascript/triton/__init__.py
|
vutsalsinghal/python-terrascript
|
3b9fb5ad77453d330fb0cd03524154a342c5d5dc
|
[
"BSD-2-Clause"
] | null | null | null |
# terrascript/triton/__init__.py
import terrascript
class triton(terrascript.Provider):
pass
| 16.333333
| 35
| 0.795918
| 11
| 98
| 6.727273
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.122449
| 98
| 6
| 36
| 16.333333
| 0.860465
| 0.306122
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
bc6c3d2943dffd2db638fdc56eedb5f617d7fe41
| 47
|
py
|
Python
|
aiohttphelper/__init__.py
|
gregorybarille/AioCalls
|
15a8ffe56e656b641abcbc5a899b662e7fc2f0d8
|
[
"MIT"
] | 1
|
2020-07-10T13:06:27.000Z
|
2020-07-10T13:06:27.000Z
|
aiohttphelper/__init__.py
|
gregorybarille/AioCalls
|
15a8ffe56e656b641abcbc5a899b662e7fc2f0d8
|
[
"MIT"
] | null | null | null |
aiohttphelper/__init__.py
|
gregorybarille/AioCalls
|
15a8ffe56e656b641abcbc5a899b662e7fc2f0d8
|
[
"MIT"
] | null | null | null |
from .functions import get, put, post, delete
| 15.666667
| 45
| 0.744681
| 7
| 47
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.170213
| 47
| 2
| 46
| 23.5
| 0.897436
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
bc7d118927d5343e885592d3c62eefb0a18867ce
| 51
|
py
|
Python
|
__init__.py
|
goodship1/CoinCommand
|
9b8f36d4b5241f9cb56cf68bf449c1af03c44a7a
|
[
"MIT"
] | 2
|
2018-01-25T22:10:37.000Z
|
2020-02-13T16:49:55.000Z
|
__init__.py
|
goodship1/CoinCommand
|
9b8f36d4b5241f9cb56cf68bf449c1af03c44a7a
|
[
"MIT"
] | 5
|
2018-03-03T23:35:21.000Z
|
2019-09-22T18:30:49.000Z
|
__init__.py
|
goodship1/CoinCommand
|
9b8f36d4b5241f9cb56cf68bf449c1af03c44a7a
|
[
"MIT"
] | null | null | null |
from .CoincommandExceptions import CoinDoesntExist
| 25.5
| 50
| 0.901961
| 4
| 51
| 11.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078431
| 51
| 1
| 51
| 51
| 0.978723
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
bc825e0310b5e4859b0059b3ada81590c68d8418
| 569
|
py
|
Python
|
pyspedas/erg/__init__.py
|
pulupa/pyspedas
|
7228199cf16eca2a27d130f1e4985ef1e69462ea
|
[
"MIT"
] | 3
|
2018-10-22T20:15:39.000Z
|
2019-03-06T18:03:35.000Z
|
pyspedas/erg/__init__.py
|
pulupa/pyspedas
|
7228199cf16eca2a27d130f1e4985ef1e69462ea
|
[
"MIT"
] | null | null | null |
pyspedas/erg/__init__.py
|
pulupa/pyspedas
|
7228199cf16eca2a27d130f1e4985ef1e69462ea
|
[
"MIT"
] | 2
|
2019-01-25T20:03:33.000Z
|
2019-07-05T19:53:30.000Z
|
from .satellite.erg.hep.hep import hep
from .satellite.erg.lepe.lepe import lepe
from .satellite.erg.lepi.lepi import lepi
from .satellite.erg.mepe.mepe import mepe
from .satellite.erg.mepi.mepi_nml import mepi_nml
from .satellite.erg.mepi.mepi_tof import mepi_tof
from .satellite.erg.mgf.mgf import mgf
from .satellite.erg.orb.orb import orb
from .satellite.erg.pwe.pwe_efd import pwe_efd
from .satellite.erg.pwe.pwe_hfa import pwe_hfa
from .satellite.erg.pwe.pwe_ofa import pwe_ofa
from .satellite.erg.pwe.pwe_wfc import pwe_wfc
from .satellite.erg.xep.xep import xep
| 43.769231
| 49
| 0.818981
| 103
| 569
| 4.407767
| 0.184466
| 0.372247
| 0.45815
| 0.167401
| 0.299559
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.089631
| 569
| 13
| 50
| 43.769231
| 0.876448
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
bcaccc0f170c85448aa391ca61f85defd86f0387
| 250
|
py
|
Python
|
examplepackage/tests/test_examplemodule.py
|
dionysisbacchus/python-project-template
|
1c25dc7022614b59c61bb99321c580afdeac17f2
|
[
"MIT"
] | null | null | null |
examplepackage/tests/test_examplemodule.py
|
dionysisbacchus/python-project-template
|
1c25dc7022614b59c61bb99321c580afdeac17f2
|
[
"MIT"
] | null | null | null |
examplepackage/tests/test_examplemodule.py
|
dionysisbacchus/python-project-template
|
1c25dc7022614b59c61bb99321c580afdeac17f2
|
[
"MIT"
] | 3
|
2022-02-22T06:54:19.000Z
|
2022-02-23T20:06:13.000Z
|
import pytest
from examplepackage.examplemodule import example_function
@pytest.mark.parametrize("test_input,expected", [(2, 4), (3, 9), (5, 25)])
def test_example_function(test_input, expected):
assert example_function(test_input) == expected
| 31.25
| 74
| 0.772
| 33
| 250
| 5.636364
| 0.606061
| 0.241935
| 0.274194
| 0.258065
| 0.344086
| 0
| 0
| 0
| 0
| 0
| 0
| 0.03125
| 0.104
| 250
| 7
| 75
| 35.714286
| 0.799107
| 0
| 0
| 0
| 0
| 0
| 0.076
| 0
| 0
| 0
| 0
| 0
| 0.2
| 1
| 0.2
| false
| 0
| 0.4
| 0
| 0.6
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
bcf9571cb6b1398073675c04d6c2d86ad989f69e
| 7,371
|
py
|
Python
|
cottonformation/res/inspector.py
|
gitter-badger/cottonformation-project
|
354f1dce7ea106e209af2d5d818b6033a27c193c
|
[
"BSD-2-Clause"
] | 5
|
2021-07-22T03:45:59.000Z
|
2021-12-17T21:07:14.000Z
|
cottonformation/res/inspector.py
|
gitter-badger/cottonformation-project
|
354f1dce7ea106e209af2d5d818b6033a27c193c
|
[
"BSD-2-Clause"
] | 1
|
2021-06-25T18:01:31.000Z
|
2021-06-25T18:01:31.000Z
|
cottonformation/res/inspector.py
|
gitter-badger/cottonformation-project
|
354f1dce7ea106e209af2d5d818b6033a27c193c
|
[
"BSD-2-Clause"
] | 2
|
2021-06-27T03:08:21.000Z
|
2021-06-28T22:15:51.000Z
|
# -*- coding: utf-8 -*-
"""
This module
"""
import attr
import typing
from ..core.model import (
Property, Resource, Tag, GetAtt, TypeHint, TypeCheck,
)
from ..core.constant import AttrMeta
#--- Property declaration ---
#--- Resource declaration ---
@attr.s
class ResourceGroup(Resource):
"""
AWS Object Type = "AWS::Inspector::ResourceGroup"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-inspector-resourcegroup.html
Property Document:
- ``rp_ResourceGroupTags``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-inspector-resourcegroup.html#cfn-inspector-resourcegroup-resourcegrouptags
"""
AWS_OBJECT_TYPE = "AWS::Inspector::ResourceGroup"
rp_ResourceGroupTags: typing.List[typing.Union[Tag, dict]] = attr.ib(
default=None,
converter=Tag.from_list,
validator=attr.validators.deep_iterable(member_validator=attr.validators.instance_of(Tag), iterable_validator=attr.validators.instance_of(list)),
metadata={AttrMeta.PROPERTY_NAME: "ResourceGroupTags"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-inspector-resourcegroup.html#cfn-inspector-resourcegroup-resourcegrouptags"""
@property
def rv_Arn(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-inspector-resourcegroup.html#aws-resource-inspector-resourcegroup-return-values"""
return GetAtt(resource=self, attr_name="Arn")
@attr.s
class AssessmentTemplate(Resource):
"""
AWS Object Type = "AWS::Inspector::AssessmentTemplate"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-inspector-assessmenttemplate.html
Property Document:
- ``rp_AssessmentTargetArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-inspector-assessmenttemplate.html#cfn-inspector-assessmenttemplate-assessmenttargetarn
- ``rp_DurationInSeconds``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-inspector-assessmenttemplate.html#cfn-inspector-assessmenttemplate-durationinseconds
- ``rp_RulesPackageArns``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-inspector-assessmenttemplate.html#cfn-inspector-assessmenttemplate-rulespackagearns
- ``p_AssessmentTemplateName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-inspector-assessmenttemplate.html#cfn-inspector-assessmenttemplate-assessmenttemplatename
- ``p_UserAttributesForFindings``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-inspector-assessmenttemplate.html#cfn-inspector-assessmenttemplate-userattributesforfindings
"""
AWS_OBJECT_TYPE = "AWS::Inspector::AssessmentTemplate"
rp_AssessmentTargetArn: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "AssessmentTargetArn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-inspector-assessmenttemplate.html#cfn-inspector-assessmenttemplate-assessmenttargetarn"""
rp_DurationInSeconds: int = attr.ib(
default=None,
validator=attr.validators.instance_of(int),
metadata={AttrMeta.PROPERTY_NAME: "DurationInSeconds"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-inspector-assessmenttemplate.html#cfn-inspector-assessmenttemplate-durationinseconds"""
rp_RulesPackageArns: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list)),
metadata={AttrMeta.PROPERTY_NAME: "RulesPackageArns"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-inspector-assessmenttemplate.html#cfn-inspector-assessmenttemplate-rulespackagearns"""
p_AssessmentTemplateName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "AssessmentTemplateName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-inspector-assessmenttemplate.html#cfn-inspector-assessmenttemplate-assessmenttemplatename"""
p_UserAttributesForFindings: typing.List[typing.Union[Tag, dict]] = attr.ib(
default=None,
converter=Tag.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(Tag), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "UserAttributesForFindings"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-inspector-assessmenttemplate.html#cfn-inspector-assessmenttemplate-userattributesforfindings"""
@property
def rv_Arn(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-inspector-assessmenttemplate.html#aws-resource-inspector-assessmenttemplate-return-values"""
return GetAtt(resource=self, attr_name="Arn")
@attr.s
class AssessmentTarget(Resource):
"""
AWS Object Type = "AWS::Inspector::AssessmentTarget"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-inspector-assessmenttarget.html
Property Document:
- ``p_AssessmentTargetName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-inspector-assessmenttarget.html#cfn-inspector-assessmenttarget-assessmenttargetname
- ``p_ResourceGroupArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-inspector-assessmenttarget.html#cfn-inspector-assessmenttarget-resourcegrouparn
"""
AWS_OBJECT_TYPE = "AWS::Inspector::AssessmentTarget"
p_AssessmentTargetName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "AssessmentTargetName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-inspector-assessmenttarget.html#cfn-inspector-assessmenttarget-assessmenttargetname"""
p_ResourceGroupArn: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "ResourceGroupArn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-inspector-assessmenttarget.html#cfn-inspector-assessmenttarget-resourcegrouparn"""
@property
def rv_Arn(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-inspector-assessmenttarget.html#aws-resource-inspector-assessmenttarget-return-values"""
return GetAtt(resource=self, attr_name="Arn")
| 52.65
| 206
| 0.765025
| 765
| 7,371
| 7.275817
| 0.099346
| 0.049407
| 0.089831
| 0.067194
| 0.874057
| 0.874057
| 0.82447
| 0.82447
| 0.82447
| 0.802551
| 0
| 0.000152
| 0.109076
| 7,371
| 139
| 207
| 53.028777
| 0.847419
| 0.366436
| 0
| 0.378788
| 0
| 0
| 0.080655
| 0.044739
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.060606
| 0
| 0.363636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4c2861eb6324559081276d32528bdabcbc7ccbf6
| 38
|
py
|
Python
|
tests/inline_test_resources/5/importB.py
|
thesynman/pybricksdev
|
6f34cfb7a5f26628fe3cedae1ce51ee6024f57b9
|
[
"MIT"
] | null | null | null |
tests/inline_test_resources/5/importB.py
|
thesynman/pybricksdev
|
6f34cfb7a5f26628fe3cedae1ce51ee6024f57b9
|
[
"MIT"
] | null | null | null |
tests/inline_test_resources/5/importB.py
|
thesynman/pybricksdev
|
6f34cfb7a5f26628fe3cedae1ce51ee6024f57b9
|
[
"MIT"
] | null | null | null |
noway = 'should not import this file'
| 19
| 37
| 0.736842
| 6
| 38
| 4.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.184211
| 38
| 1
| 38
| 38
| 0.903226
| 0
| 0
| 0
| 0
| 0
| 0.710526
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4c3abb2a1ed1cbd85234cb3c5af8aefe1a81b75d
| 32,443
|
py
|
Python
|
dlp/tests/unit/gapic/v2/test_dlp_service_client_v2.py
|
udengcnf/gcloud
|
dd1714bd754e18739339e611c42a391ced27c614
|
[
"Apache-2.0"
] | null | null | null |
dlp/tests/unit/gapic/v2/test_dlp_service_client_v2.py
|
udengcnf/gcloud
|
dd1714bd754e18739339e611c42a391ced27c614
|
[
"Apache-2.0"
] | null | null | null |
dlp/tests/unit/gapic/v2/test_dlp_service_client_v2.py
|
udengcnf/gcloud
|
dd1714bd754e18739339e611c42a391ced27c614
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests."""
import pytest
from google.cloud import dlp_v2
from google.cloud.dlp_v2.proto import dlp_pb2
from google.protobuf import empty_pb2
class MultiCallableStub(object):
"""Stub for the grpc.UnaryUnaryMultiCallable interface."""
def __init__(self, method, channel_stub):
self.method = method
self.channel_stub = channel_stub
def __call__(self, request, timeout=None, metadata=None, credentials=None):
self.channel_stub.requests.append((self.method, request))
response = None
if self.channel_stub.responses:
response = self.channel_stub.responses.pop()
if isinstance(response, Exception):
raise response
if response:
return response
class ChannelStub(object):
"""Stub for the grpc.Channel interface."""
def __init__(self, responses=[]):
self.responses = responses
self.requests = []
def unary_unary(self,
method,
request_serializer=None,
response_deserializer=None):
return MultiCallableStub(method, self)
class CustomException(Exception):
pass
class TestDlpServiceClient(object):
def test_inspect_content(self):
# Setup Expected Response
expected_response = {}
expected_response = dlp_pb2.InspectContentResponse(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = dlp_v2.DlpServiceClient(channel=channel)
# Setup Request
parent = client.project_path('[PROJECT]')
response = client.inspect_content(parent)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = dlp_pb2.InspectContentRequest(parent=parent)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_inspect_content_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = dlp_v2.DlpServiceClient(channel=channel)
# Setup request
parent = client.project_path('[PROJECT]')
with pytest.raises(CustomException):
client.inspect_content(parent)
def test_redact_image(self):
# Setup Expected Response
redacted_image = b'28'
extracted_text = 'extractedText998260012'
expected_response = {
'redacted_image': redacted_image,
'extracted_text': extracted_text
}
expected_response = dlp_pb2.RedactImageResponse(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = dlp_v2.DlpServiceClient(channel=channel)
# Setup Request
parent = client.project_path('[PROJECT]')
response = client.redact_image(parent)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = dlp_pb2.RedactImageRequest(parent=parent)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_redact_image_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = dlp_v2.DlpServiceClient(channel=channel)
# Setup request
parent = client.project_path('[PROJECT]')
with pytest.raises(CustomException):
client.redact_image(parent)
def test_deidentify_content(self):
# Setup Expected Response
expected_response = {}
expected_response = dlp_pb2.DeidentifyContentResponse(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = dlp_v2.DlpServiceClient(channel=channel)
# Setup Request
parent = client.project_path('[PROJECT]')
response = client.deidentify_content(parent)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = dlp_pb2.DeidentifyContentRequest(parent=parent)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_deidentify_content_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = dlp_v2.DlpServiceClient(channel=channel)
# Setup request
parent = client.project_path('[PROJECT]')
with pytest.raises(CustomException):
client.deidentify_content(parent)
def test_reidentify_content(self):
# Setup Expected Response
expected_response = {}
expected_response = dlp_pb2.ReidentifyContentResponse(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = dlp_v2.DlpServiceClient(channel=channel)
# Setup Request
parent = client.project_path('[PROJECT]')
response = client.reidentify_content(parent)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = dlp_pb2.ReidentifyContentRequest(parent=parent)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_reidentify_content_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = dlp_v2.DlpServiceClient(channel=channel)
# Setup request
parent = client.project_path('[PROJECT]')
with pytest.raises(CustomException):
client.reidentify_content(parent)
def test_list_info_types(self):
# Setup Expected Response
expected_response = {}
expected_response = dlp_pb2.ListInfoTypesResponse(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = dlp_v2.DlpServiceClient(channel=channel)
response = client.list_info_types()
assert expected_response == response
assert len(channel.requests) == 1
expected_request = dlp_pb2.ListInfoTypesRequest()
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_info_types_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = dlp_v2.DlpServiceClient(channel=channel)
with pytest.raises(CustomException):
client.list_info_types()
def test_create_inspect_template(self):
# Setup Expected Response
name = 'name3373707'
display_name = 'displayName1615086568'
description = 'description-1724546052'
expected_response = {
'name': name,
'display_name': display_name,
'description': description
}
expected_response = dlp_pb2.InspectTemplate(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = dlp_v2.DlpServiceClient(channel=channel)
# Setup Request
parent = client.organization_path('[ORGANIZATION]')
response = client.create_inspect_template(parent)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = dlp_pb2.CreateInspectTemplateRequest(parent=parent)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_create_inspect_template_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = dlp_v2.DlpServiceClient(channel=channel)
# Setup request
parent = client.organization_path('[ORGANIZATION]')
with pytest.raises(CustomException):
client.create_inspect_template(parent)
def test_update_inspect_template(self):
# Setup Expected Response
name_2 = 'name2-1052831874'
display_name = 'displayName1615086568'
description = 'description-1724546052'
expected_response = {
'name': name_2,
'display_name': display_name,
'description': description
}
expected_response = dlp_pb2.InspectTemplate(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = dlp_v2.DlpServiceClient(channel=channel)
# Setup Request
name = client.organization_inspect_template_path(
'[ORGANIZATION]', '[INSPECT_TEMPLATE]')
response = client.update_inspect_template(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = dlp_pb2.UpdateInspectTemplateRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_update_inspect_template_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = dlp_v2.DlpServiceClient(channel=channel)
# Setup request
name = client.organization_inspect_template_path(
'[ORGANIZATION]', '[INSPECT_TEMPLATE]')
with pytest.raises(CustomException):
client.update_inspect_template(name)
def test_get_inspect_template(self):
# Setup Expected Response
name = 'name3373707'
display_name = 'displayName1615086568'
description = 'description-1724546052'
expected_response = {
'name': name,
'display_name': display_name,
'description': description
}
expected_response = dlp_pb2.InspectTemplate(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = dlp_v2.DlpServiceClient(channel=channel)
response = client.get_inspect_template()
assert expected_response == response
assert len(channel.requests) == 1
expected_request = dlp_pb2.GetInspectTemplateRequest()
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_inspect_template_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = dlp_v2.DlpServiceClient(channel=channel)
with pytest.raises(CustomException):
client.get_inspect_template()
def test_list_inspect_templates(self):
# Setup Expected Response
next_page_token = ''
inspect_templates_element = {}
inspect_templates = [inspect_templates_element]
expected_response = {
'next_page_token': next_page_token,
'inspect_templates': inspect_templates
}
expected_response = dlp_pb2.ListInspectTemplatesResponse(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = dlp_v2.DlpServiceClient(channel=channel)
# Setup Request
parent = client.organization_path('[ORGANIZATION]')
paged_list_response = client.list_inspect_templates(parent)
resources = list(paged_list_response)
assert len(resources) == 1
assert expected_response.inspect_templates[0] == resources[0]
assert len(channel.requests) == 1
expected_request = dlp_pb2.ListInspectTemplatesRequest(parent=parent)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_inspect_templates_exception(self):
channel = ChannelStub(responses=[CustomException()])
client = dlp_v2.DlpServiceClient(channel=channel)
# Setup request
parent = client.organization_path('[ORGANIZATION]')
paged_list_response = client.list_inspect_templates(parent)
with pytest.raises(CustomException):
list(paged_list_response)
def test_delete_inspect_template(self):
channel = ChannelStub()
client = dlp_v2.DlpServiceClient(channel=channel)
# Setup Request
name = client.organization_inspect_template_path(
'[ORGANIZATION]', '[INSPECT_TEMPLATE]')
client.delete_inspect_template(name)
assert len(channel.requests) == 1
expected_request = dlp_pb2.DeleteInspectTemplateRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_delete_inspect_template_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = dlp_v2.DlpServiceClient(channel=channel)
# Setup request
name = client.organization_inspect_template_path(
'[ORGANIZATION]', '[INSPECT_TEMPLATE]')
with pytest.raises(CustomException):
client.delete_inspect_template(name)
def test_create_deidentify_template(self):
# Setup Expected Response
name = 'name3373707'
display_name = 'displayName1615086568'
description = 'description-1724546052'
expected_response = {
'name': name,
'display_name': display_name,
'description': description
}
expected_response = dlp_pb2.DeidentifyTemplate(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = dlp_v2.DlpServiceClient(channel=channel)
# Setup Request
parent = client.organization_path('[ORGANIZATION]')
response = client.create_deidentify_template(parent)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = dlp_pb2.CreateDeidentifyTemplateRequest(
parent=parent)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_create_deidentify_template_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = dlp_v2.DlpServiceClient(channel=channel)
# Setup request
parent = client.organization_path('[ORGANIZATION]')
with pytest.raises(CustomException):
client.create_deidentify_template(parent)
def test_update_deidentify_template(self):
# Setup Expected Response
name_2 = 'name2-1052831874'
display_name = 'displayName1615086568'
description = 'description-1724546052'
expected_response = {
'name': name_2,
'display_name': display_name,
'description': description
}
expected_response = dlp_pb2.DeidentifyTemplate(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = dlp_v2.DlpServiceClient(channel=channel)
# Setup Request
name = client.organization_deidentify_template_path(
'[ORGANIZATION]', '[DEIDENTIFY_TEMPLATE]')
response = client.update_deidentify_template(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = dlp_pb2.UpdateDeidentifyTemplateRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_update_deidentify_template_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = dlp_v2.DlpServiceClient(channel=channel)
# Setup request
name = client.organization_deidentify_template_path(
'[ORGANIZATION]', '[DEIDENTIFY_TEMPLATE]')
with pytest.raises(CustomException):
client.update_deidentify_template(name)
def test_get_deidentify_template(self):
# Setup Expected Response
name_2 = 'name2-1052831874'
display_name = 'displayName1615086568'
description = 'description-1724546052'
expected_response = {
'name': name_2,
'display_name': display_name,
'description': description
}
expected_response = dlp_pb2.DeidentifyTemplate(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = dlp_v2.DlpServiceClient(channel=channel)
# Setup Request
name = client.organization_deidentify_template_path(
'[ORGANIZATION]', '[DEIDENTIFY_TEMPLATE]')
response = client.get_deidentify_template(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = dlp_pb2.GetDeidentifyTemplateRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_deidentify_template_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = dlp_v2.DlpServiceClient(channel=channel)
# Setup request
name = client.organization_deidentify_template_path(
'[ORGANIZATION]', '[DEIDENTIFY_TEMPLATE]')
with pytest.raises(CustomException):
client.get_deidentify_template(name)
def test_list_deidentify_templates(self):
# Setup Expected Response
next_page_token = ''
deidentify_templates_element = {}
deidentify_templates = [deidentify_templates_element]
expected_response = {
'next_page_token': next_page_token,
'deidentify_templates': deidentify_templates
}
expected_response = dlp_pb2.ListDeidentifyTemplatesResponse(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = dlp_v2.DlpServiceClient(channel=channel)
# Setup Request
parent = client.organization_path('[ORGANIZATION]')
paged_list_response = client.list_deidentify_templates(parent)
resources = list(paged_list_response)
assert len(resources) == 1
assert expected_response.deidentify_templates[0] == resources[0]
assert len(channel.requests) == 1
expected_request = dlp_pb2.ListDeidentifyTemplatesRequest(
parent=parent)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_deidentify_templates_exception(self):
channel = ChannelStub(responses=[CustomException()])
client = dlp_v2.DlpServiceClient(channel=channel)
# Setup request
parent = client.organization_path('[ORGANIZATION]')
paged_list_response = client.list_deidentify_templates(parent)
with pytest.raises(CustomException):
list(paged_list_response)
def test_delete_deidentify_template(self):
channel = ChannelStub()
client = dlp_v2.DlpServiceClient(channel=channel)
# Setup Request
name = client.organization_deidentify_template_path(
'[ORGANIZATION]', '[DEIDENTIFY_TEMPLATE]')
client.delete_deidentify_template(name)
assert len(channel.requests) == 1
expected_request = dlp_pb2.DeleteDeidentifyTemplateRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_delete_deidentify_template_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = dlp_v2.DlpServiceClient(channel=channel)
# Setup request
name = client.organization_deidentify_template_path(
'[ORGANIZATION]', '[DEIDENTIFY_TEMPLATE]')
with pytest.raises(CustomException):
client.delete_deidentify_template(name)
def test_create_dlp_job(self):
# Setup Expected Response
name = 'name3373707'
job_trigger_name = 'jobTriggerName1819490804'
expected_response = {
'name': name,
'job_trigger_name': job_trigger_name
}
expected_response = dlp_pb2.DlpJob(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = dlp_v2.DlpServiceClient(channel=channel)
# Setup Request
parent = client.project_path('[PROJECT]')
response = client.create_dlp_job(parent)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = dlp_pb2.CreateDlpJobRequest(parent=parent)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_create_dlp_job_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = dlp_v2.DlpServiceClient(channel=channel)
# Setup request
parent = client.project_path('[PROJECT]')
with pytest.raises(CustomException):
client.create_dlp_job(parent)
def test_list_dlp_jobs(self):
# Setup Expected Response
next_page_token = ''
jobs_element = {}
jobs = [jobs_element]
expected_response = {'next_page_token': next_page_token, 'jobs': jobs}
expected_response = dlp_pb2.ListDlpJobsResponse(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = dlp_v2.DlpServiceClient(channel=channel)
# Setup Request
parent = client.project_path('[PROJECT]')
paged_list_response = client.list_dlp_jobs(parent)
resources = list(paged_list_response)
assert len(resources) == 1
assert expected_response.jobs[0] == resources[0]
assert len(channel.requests) == 1
expected_request = dlp_pb2.ListDlpJobsRequest(parent=parent)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_dlp_jobs_exception(self):
channel = ChannelStub(responses=[CustomException()])
client = dlp_v2.DlpServiceClient(channel=channel)
# Setup request
parent = client.project_path('[PROJECT]')
paged_list_response = client.list_dlp_jobs(parent)
with pytest.raises(CustomException):
list(paged_list_response)
def test_get_dlp_job(self):
# Setup Expected Response
name_2 = 'name2-1052831874'
job_trigger_name = 'jobTriggerName1819490804'
expected_response = {
'name': name_2,
'job_trigger_name': job_trigger_name
}
expected_response = dlp_pb2.DlpJob(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = dlp_v2.DlpServiceClient(channel=channel)
# Setup Request
name = client.dlp_job_path('[PROJECT]', '[DLP_JOB]')
response = client.get_dlp_job(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = dlp_pb2.GetDlpJobRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_dlp_job_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = dlp_v2.DlpServiceClient(channel=channel)
# Setup request
name = client.dlp_job_path('[PROJECT]', '[DLP_JOB]')
with pytest.raises(CustomException):
client.get_dlp_job(name)
def test_delete_dlp_job(self):
channel = ChannelStub()
client = dlp_v2.DlpServiceClient(channel=channel)
# Setup Request
name = client.dlp_job_path('[PROJECT]', '[DLP_JOB]')
client.delete_dlp_job(name)
assert len(channel.requests) == 1
expected_request = dlp_pb2.DeleteDlpJobRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_delete_dlp_job_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = dlp_v2.DlpServiceClient(channel=channel)
# Setup request
name = client.dlp_job_path('[PROJECT]', '[DLP_JOB]')
with pytest.raises(CustomException):
client.delete_dlp_job(name)
def test_cancel_dlp_job(self):
channel = ChannelStub()
client = dlp_v2.DlpServiceClient(channel=channel)
# Setup Request
name = client.dlp_job_path('[PROJECT]', '[DLP_JOB]')
client.cancel_dlp_job(name)
assert len(channel.requests) == 1
expected_request = dlp_pb2.CancelDlpJobRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_cancel_dlp_job_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = dlp_v2.DlpServiceClient(channel=channel)
# Setup request
name = client.dlp_job_path('[PROJECT]', '[DLP_JOB]')
with pytest.raises(CustomException):
client.cancel_dlp_job(name)
def test_list_job_triggers(self):
# Setup Expected Response
next_page_token = ''
job_triggers_element = {}
job_triggers = [job_triggers_element]
expected_response = {
'next_page_token': next_page_token,
'job_triggers': job_triggers
}
expected_response = dlp_pb2.ListJobTriggersResponse(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = dlp_v2.DlpServiceClient(channel=channel)
# Setup Request
parent = client.project_path('[PROJECT]')
paged_list_response = client.list_job_triggers(parent)
resources = list(paged_list_response)
assert len(resources) == 1
assert expected_response.job_triggers[0] == resources[0]
assert len(channel.requests) == 1
expected_request = dlp_pb2.ListJobTriggersRequest(parent=parent)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_job_triggers_exception(self):
channel = ChannelStub(responses=[CustomException()])
client = dlp_v2.DlpServiceClient(channel=channel)
# Setup request
parent = client.project_path('[PROJECT]')
paged_list_response = client.list_job_triggers(parent)
with pytest.raises(CustomException):
list(paged_list_response)
def test_get_job_trigger(self):
# Setup Expected Response
name_2 = 'name2-1052831874'
display_name = 'displayName1615086568'
description = 'description-1724546052'
expected_response = {
'name': name_2,
'display_name': display_name,
'description': description
}
expected_response = dlp_pb2.JobTrigger(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = dlp_v2.DlpServiceClient(channel=channel)
# Setup Request
name = client.project_job_trigger_path('[PROJECT]', '[JOB_TRIGGER]')
response = client.get_job_trigger(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = dlp_pb2.GetJobTriggerRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_job_trigger_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = dlp_v2.DlpServiceClient(channel=channel)
# Setup request
name = client.project_job_trigger_path('[PROJECT]', '[JOB_TRIGGER]')
with pytest.raises(CustomException):
client.get_job_trigger(name)
def test_delete_job_trigger(self):
channel = ChannelStub()
client = dlp_v2.DlpServiceClient(channel=channel)
# Setup Request
name = 'name3373707'
client.delete_job_trigger(name)
assert len(channel.requests) == 1
expected_request = dlp_pb2.DeleteJobTriggerRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_delete_job_trigger_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = dlp_v2.DlpServiceClient(channel=channel)
# Setup request
name = 'name3373707'
with pytest.raises(CustomException):
client.delete_job_trigger(name)
def test_update_job_trigger(self):
# Setup Expected Response
name_2 = 'name2-1052831874'
display_name = 'displayName1615086568'
description = 'description-1724546052'
expected_response = {
'name': name_2,
'display_name': display_name,
'description': description
}
expected_response = dlp_pb2.JobTrigger(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = dlp_v2.DlpServiceClient(channel=channel)
# Setup Request
name = client.project_job_trigger_path('[PROJECT]', '[JOB_TRIGGER]')
response = client.update_job_trigger(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = dlp_pb2.UpdateJobTriggerRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_update_job_trigger_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = dlp_v2.DlpServiceClient(channel=channel)
# Setup request
name = client.project_job_trigger_path('[PROJECT]', '[JOB_TRIGGER]')
with pytest.raises(CustomException):
client.update_job_trigger(name)
def test_create_job_trigger(self):
# Setup Expected Response
name = 'name3373707'
display_name = 'displayName1615086568'
description = 'description-1724546052'
expected_response = {
'name': name,
'display_name': display_name,
'description': description
}
expected_response = dlp_pb2.JobTrigger(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = dlp_v2.DlpServiceClient(channel=channel)
# Setup Request
parent = client.project_path('[PROJECT]')
response = client.create_job_trigger(parent)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = dlp_pb2.CreateJobTriggerRequest(parent=parent)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_create_job_trigger_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = dlp_v2.DlpServiceClient(channel=channel)
# Setup request
parent = client.project_path('[PROJECT]')
with pytest.raises(CustomException):
client.create_job_trigger(parent)
| 35.073514
| 79
| 0.668095
| 3,223
| 32,443
| 6.471921
| 0.064536
| 0.092047
| 0.026368
| 0.06472
| 0.871422
| 0.82655
| 0.82281
| 0.814756
| 0.802148
| 0.79745
| 0
| 0.021822
| 0.247141
| 32,443
| 924
| 80
| 35.111472
| 0.83218
| 0.083192
| 0
| 0.693878
| 0
| 0
| 0.06235
| 0.019691
| 0
| 0
| 0
| 0
| 0.12585
| 1
| 0.091837
| false
| 0.001701
| 0.006803
| 0.001701
| 0.108844
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
910dd7c2ccdb8478d45a4d1037b5ebdbb57c27fd
| 49
|
py
|
Python
|
teste3.py
|
asalesg/python
|
885ff5fae1e685424863286082fbe49ca5f4efe7
|
[
"MIT"
] | null | null | null |
teste3.py
|
asalesg/python
|
885ff5fae1e685424863286082fbe49ca5f4efe7
|
[
"MIT"
] | null | null | null |
teste3.py
|
asalesg/python
|
885ff5fae1e685424863286082fbe49ca5f4efe7
|
[
"MIT"
] | null | null | null |
lista = [0,5,10,15,5,10,20]
print(lista.count(5))
| 24.5
| 27
| 0.653061
| 12
| 49
| 2.666667
| 0.666667
| 0.1875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.26087
| 0.061224
| 49
| 2
| 28
| 24.5
| 0.434783
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
9129bbed156ecbcc8cc5c6c777c4e04f572f2563
| 125
|
py
|
Python
|
tests/test_match_parse.py
|
rickproza/twill
|
7a98e4912a8ff929a94e35d35e7a027472ee4f46
|
[
"MIT"
] | 13
|
2020-04-18T15:17:58.000Z
|
2022-02-24T13:25:46.000Z
|
tests/test_match_parse.py
|
rickproza/twill
|
7a98e4912a8ff929a94e35d35e7a027472ee4f46
|
[
"MIT"
] | 5
|
2020-04-04T21:16:00.000Z
|
2022-02-10T00:26:20.000Z
|
tests/test_match_parse.py
|
rickproza/twill
|
7a98e4912a8ff929a94e35d35e7a027472ee4f46
|
[
"MIT"
] | 3
|
2020-06-06T17:26:19.000Z
|
2022-02-10T00:30:39.000Z
|
from .utils import execute_script
def test_match_parse(url):
execute_script('test_match_parse.twill', initial_url=url)
| 20.833333
| 61
| 0.8
| 19
| 125
| 4.894737
| 0.631579
| 0.27957
| 0.301075
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.112
| 125
| 5
| 62
| 25
| 0.837838
| 0
| 0
| 0
| 0
| 0
| 0.176
| 0.176
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
91454052c40a3924d575f59ff7f47c6043d9897f
| 45,806
|
py
|
Python
|
tests/api/v1_3_0/test_devices.py
|
wastorga/dnacentersdk
|
1a25aaef2eaa016fe54ebebbd7448919e0effa3f
|
[
"MIT"
] | null | null | null |
tests/api/v1_3_0/test_devices.py
|
wastorga/dnacentersdk
|
1a25aaef2eaa016fe54ebebbd7448919e0effa3f
|
[
"MIT"
] | null | null | null |
tests/api/v1_3_0/test_devices.py
|
wastorga/dnacentersdk
|
1a25aaef2eaa016fe54ebebbd7448919e0effa3f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""DNACenterAPI devices API fixtures and tests.
Copyright (c) 2019 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import pytest
from tests.environment import DNA_CENTER_VERSION
from tests.models.schema_validator import json_schema_validate
pytestmark = pytest.mark.skipif(DNA_CENTER_VERSION != '1.3.0', reason='version does not match')
def is_valid_get_module_info_by_id(obj):
json_schema_validate('jsd_0db7da744c0b83d8_v1_3_0').validate(obj)
return True
def get_module_info_by_id(api):
endpoint_result = api.devices.get_module_info_by_id(
id='string'
)
return endpoint_result
@pytest.mark.devices
def test_get_module_info_by_id(api):
assert is_valid_get_module_info_by_id(
get_module_info_by_id(api)
)
def get_module_info_by_id_default(api):
endpoint_result = api.devices.get_module_info_by_id(
id='string'
)
return endpoint_result
@pytest.mark.devices
def test_get_module_info_by_id_default(api):
try:
assert is_valid_get_module_info_by_id(
get_module_info_by_id_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_delete_device_by_id(obj):
json_schema_validate('jsd_1c894b5848eab214_v1_3_0').validate(obj)
return True
def delete_device_by_id(api):
endpoint_result = api.devices.delete_device_by_id(
id='string',
is_force_delete=True
)
return endpoint_result
@pytest.mark.devices
def test_delete_device_by_id(api):
assert is_valid_delete_device_by_id(
delete_device_by_id(api)
)
def delete_device_by_id_default(api):
endpoint_result = api.devices.delete_device_by_id(
id='string',
is_force_delete=None
)
return endpoint_result
@pytest.mark.devices
def test_delete_device_by_id_default(api):
try:
assert is_valid_delete_device_by_id(
delete_device_by_id_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_sync_devices_using_forcesync(obj):
json_schema_validate('jsd_3b9ef9674429be4c_v1_3_0').validate(obj)
return True
def sync_devices_using_forcesync(api):
endpoint_result = api.devices.sync_devices_using_forcesync(
active_validation=True,
force_sync=True,
payload=[{}]
)
return endpoint_result
@pytest.mark.devices
def test_sync_devices_using_forcesync(api):
assert is_valid_sync_devices_using_forcesync(
sync_devices_using_forcesync(api)
)
def sync_devices_using_forcesync_default(api):
endpoint_result = api.devices.sync_devices_using_forcesync(
active_validation=True,
force_sync=None,
payload=None
)
return endpoint_result
@pytest.mark.devices
def test_sync_devices_using_forcesync_default(api):
try:
assert is_valid_sync_devices_using_forcesync(
sync_devices_using_forcesync_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_get_device_list(obj):
json_schema_validate('jsd_20b19b52464b8972_v1_3_0').validate(obj)
return True
def get_device_list(api):
endpoint_result = api.devices.get_device_list(
associated_wlc_ip='value1,value2',
collection_interval='value1,value2',
collection_status='value1,value2',
error_code='value1,value2',
error_description='value1,value2',
family='value1,value2',
hostname='value1,value2',
id='string',
license_name='value1,value2',
license_status='value1,value2',
license_type='value1,value2',
location='value1,value2',
location_name='value1,value2',
mac_address='value1,value2',
management_ip_address='value1,value2',
module_equpimenttype='value1,value2',
module_name='value1,value2',
module_operationstatecode='value1,value2',
module_partnumber='value1,value2',
module_servicestate='value1,value2',
module_vendorequipmenttype='value1,value2',
not_synced_for_minutes='value1,value2',
platform_id='value1,value2',
reachability_status='value1,value2',
role='value1,value2',
serial_number='value1,value2',
series='value1,value2',
software_type='value1,value2',
software_version='value1,value2',
type='value1,value2',
up_time='value1,value2'
)
return endpoint_result
@pytest.mark.devices
def test_get_device_list(api):
assert is_valid_get_device_list(
get_device_list(api)
)
def get_device_list_default(api):
endpoint_result = api.devices.get_device_list(
associated_wlc_ip=None,
collection_interval=None,
collection_status=None,
error_code=None,
error_description=None,
family=None,
hostname=None,
id=None,
license_name=None,
license_status=None,
license_type=None,
location=None,
location_name=None,
mac_address=None,
management_ip_address=None,
module_equpimenttype=None,
module_name=None,
module_operationstatecode=None,
module_partnumber=None,
module_servicestate=None,
module_vendorequipmenttype=None,
not_synced_for_minutes=None,
platform_id=None,
reachability_status=None,
role=None,
serial_number=None,
series=None,
software_type=None,
software_version=None,
type=None,
up_time=None
)
return endpoint_result
@pytest.mark.devices
def test_get_device_list_default(api):
try:
assert is_valid_get_device_list(
get_device_list_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_get_device_interface_vlans(obj):
json_schema_validate('jsd_288df9494f2a9746_v1_3_0').validate(obj)
return True
def get_device_interface_vlans(api):
endpoint_result = api.devices.get_device_interface_vlans(
id='string',
interface_type='string'
)
return endpoint_result
@pytest.mark.devices
def test_get_device_interface_vlans(api):
assert is_valid_get_device_interface_vlans(
get_device_interface_vlans(api)
)
def get_device_interface_vlans_default(api):
endpoint_result = api.devices.get_device_interface_vlans(
id='string',
interface_type=None
)
return endpoint_result
@pytest.mark.devices
def test_get_device_interface_vlans_default(api):
try:
assert is_valid_get_device_interface_vlans(
get_device_interface_vlans_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_get_polling_interval_for_all_devices(obj):
json_schema_validate('jsd_38bd0b884b89a785_v1_3_0').validate(obj)
return True
def get_polling_interval_for_all_devices(api):
endpoint_result = api.devices.get_polling_interval_for_all_devices(
)
return endpoint_result
@pytest.mark.devices
def test_get_polling_interval_for_all_devices(api):
assert is_valid_get_polling_interval_for_all_devices(
get_polling_interval_for_all_devices(api)
)
def get_polling_interval_for_all_devices_default(api):
endpoint_result = api.devices.get_polling_interval_for_all_devices(
)
return endpoint_result
@pytest.mark.devices
def test_get_polling_interval_for_all_devices_default(api):
try:
assert is_valid_get_polling_interval_for_all_devices(
get_polling_interval_for_all_devices_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_get_device_interfaces_by_specified_range(obj):
json_schema_validate('jsd_349c888443b89a58_v1_3_0').validate(obj)
return True
def get_device_interfaces_by_specified_range(api):
endpoint_result = api.devices.get_device_interfaces_by_specified_range(
device_id='string',
records_to_return=0,
start_index=0
)
return endpoint_result
@pytest.mark.devices
def test_get_device_interfaces_by_specified_range(api):
assert is_valid_get_device_interfaces_by_specified_range(
get_device_interfaces_by_specified_range(api)
)
def get_device_interfaces_by_specified_range_default(api):
endpoint_result = api.devices.get_device_interfaces_by_specified_range(
device_id='string',
records_to_return=0,
start_index=0
)
return endpoint_result
@pytest.mark.devices
def test_get_device_interfaces_by_specified_range_default(api):
try:
assert is_valid_get_device_interfaces_by_specified_range(
get_device_interfaces_by_specified_range_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_get_device_interface_count(obj):
json_schema_validate('jsd_3d923b184dc9a4ca_v1_3_0').validate(obj)
return True
def get_device_interface_count(api):
endpoint_result = api.devices.get_device_interface_count(
)
return endpoint_result
@pytest.mark.devices
def test_get_device_interface_count(api):
assert is_valid_get_device_interface_count(
get_device_interface_count(api)
)
def get_device_interface_count_default(api):
endpoint_result = api.devices.get_device_interface_count(
)
return endpoint_result
@pytest.mark.devices
def test_get_device_interface_count_default(api):
try:
assert is_valid_get_device_interface_count(
get_device_interface_count_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_add_device(obj):
json_schema_validate('jsd_4bb22af046fa8f08_v1_3_0').validate(obj)
return True
def add_device(api):
endpoint_result = api.devices.add_device(
active_validation=True,
cliTransport='string',
computeDevice=True,
enablePassword='string',
extendedDiscoveryInfo='string',
httpPassword='string',
httpPort='string',
httpSecure=True,
httpUserName='string',
ipAddress=['string'],
merakiOrgId=['string'],
netconfPort='string',
password='string',
payload=None,
serialNumber='string',
snmpAuthPassphrase='string',
snmpAuthProtocol='string',
snmpMode='string',
snmpPrivPassphrase='string',
snmpPrivProtocol='string',
snmpROCommunity='string',
snmpRWCommunity='string',
snmpRetry=0,
snmpTimeout=0,
snmpUserName='string',
snmpVersion='string',
type='COMPUTE_DEVICE',
updateMgmtIPaddressList=[{'existMgmtIpAddress': 'string', 'newMgmtIpAddress': 'string'}],
userName='string'
)
return endpoint_result
@pytest.mark.devices
def test_add_device(api):
assert is_valid_add_device(
add_device(api)
)
def add_device_default(api):
endpoint_result = api.devices.add_device(
active_validation=True,
cliTransport=None,
computeDevice=None,
enablePassword=None,
extendedDiscoveryInfo=None,
httpPassword=None,
httpPort=None,
httpSecure=None,
httpUserName=None,
ipAddress=None,
merakiOrgId=None,
netconfPort=None,
password=None,
payload=None,
serialNumber=None,
snmpAuthPassphrase=None,
snmpAuthProtocol=None,
snmpMode=None,
snmpPrivPassphrase=None,
snmpPrivProtocol=None,
snmpROCommunity=None,
snmpRWCommunity=None,
snmpRetry=None,
snmpTimeout=None,
snmpUserName=None,
snmpVersion=None,
type=None,
updateMgmtIPaddressList=None,
userName=None
)
return endpoint_result
@pytest.mark.devices
def test_add_device_default(api):
try:
assert is_valid_add_device(
add_device_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_get_interface_details(obj):
json_schema_validate('jsd_4eb56a614cc9a2d2_v1_3_0').validate(obj)
return True
def get_interface_details(api):
endpoint_result = api.devices.get_interface_details(
device_id='string',
name='string'
)
return endpoint_result
@pytest.mark.devices
def test_get_interface_details(api):
assert is_valid_get_interface_details(
get_interface_details(api)
)
def get_interface_details_default(api):
endpoint_result = api.devices.get_interface_details(
device_id='string',
name=None
)
return endpoint_result
@pytest.mark.devices
def test_get_interface_details_default(api):
try:
assert is_valid_get_interface_details(
get_interface_details_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_get_device_interface_count_by_id(obj):
json_schema_validate('jsd_5b8639224cd88ea7_v1_3_0').validate(obj)
return True
def get_device_interface_count_by_id(api):
endpoint_result = api.devices.get_device_interface_count_by_id(
device_id='string'
)
return endpoint_result
@pytest.mark.devices
def test_get_device_interface_count_by_id(api):
assert is_valid_get_device_interface_count_by_id(
get_device_interface_count_by_id(api)
)
def get_device_interface_count_by_id_default(api):
endpoint_result = api.devices.get_device_interface_count_by_id(
device_id='string'
)
return endpoint_result
@pytest.mark.devices
def test_get_device_interface_count_by_id_default(api):
try:
assert is_valid_get_device_interface_count_by_id(
get_device_interface_count_by_id_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_get_device_count(obj):
json_schema_validate('jsd_5db21b8e43fab7d8_v1_3_0').validate(obj)
return True
def get_device_count(api):
endpoint_result = api.devices.get_device_count(
)
return endpoint_result
@pytest.mark.devices
def test_get_device_count(api):
assert is_valid_get_device_count(
get_device_count(api)
)
def get_device_count_default(api):
endpoint_result = api.devices.get_device_count(
)
return endpoint_result
@pytest.mark.devices
def test_get_device_count_default(api):
try:
assert is_valid_get_device_count(
get_device_count_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_get_ospf_interfaces(obj):
json_schema_validate('jsd_70ad397649e9b4d3_v1_3_0').validate(obj)
return True
def get_ospf_interfaces(api):
endpoint_result = api.devices.get_ospf_interfaces(
)
return endpoint_result
@pytest.mark.devices
def test_get_ospf_interfaces(api):
assert is_valid_get_ospf_interfaces(
get_ospf_interfaces(api)
)
def get_ospf_interfaces_default(api):
endpoint_result = api.devices.get_ospf_interfaces(
)
return endpoint_result
@pytest.mark.devices
def test_get_ospf_interfaces_default(api):
try:
assert is_valid_get_ospf_interfaces(
get_ospf_interfaces_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_get_polling_interval_by_id(obj):
json_schema_validate('jsd_82918a1b4d289c5c_v1_3_0').validate(obj)
return True
def get_polling_interval_by_id(api):
endpoint_result = api.devices.get_polling_interval_by_id(
id='string'
)
return endpoint_result
@pytest.mark.devices
def test_get_polling_interval_by_id(api):
assert is_valid_get_polling_interval_by_id(
get_polling_interval_by_id(api)
)
def get_polling_interval_by_id_default(api):
endpoint_result = api.devices.get_polling_interval_by_id(
id='string'
)
return endpoint_result
@pytest.mark.devices
def test_get_polling_interval_by_id_default(api):
try:
assert is_valid_get_polling_interval_by_id(
get_polling_interval_by_id_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_get_organization_list_for_meraki(obj):
json_schema_validate('jsd_84b37ae54c59ab28_v1_3_0').validate(obj)
return True
def get_organization_list_for_meraki(api):
endpoint_result = api.devices.get_organization_list_for_meraki(
id='string'
)
return endpoint_result
@pytest.mark.devices
def test_get_organization_list_for_meraki(api):
assert is_valid_get_organization_list_for_meraki(
get_organization_list_for_meraki(api)
)
def get_organization_list_for_meraki_default(api):
endpoint_result = api.devices.get_organization_list_for_meraki(
id='string'
)
return endpoint_result
@pytest.mark.devices
def test_get_organization_list_for_meraki_default(api):
try:
assert is_valid_get_organization_list_for_meraki(
get_organization_list_for_meraki_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_get_functional_capability_by_id(obj):
json_schema_validate('jsd_81bb4804405a8d2f_v1_3_0').validate(obj)
return True
def get_functional_capability_by_id(api):
endpoint_result = api.devices.get_functional_capability_by_id(
id='string'
)
return endpoint_result
@pytest.mark.devices
def test_get_functional_capability_by_id(api):
assert is_valid_get_functional_capability_by_id(
get_functional_capability_by_id(api)
)
def get_functional_capability_by_id_default(api):
endpoint_result = api.devices.get_functional_capability_by_id(
id='string'
)
return endpoint_result
@pytest.mark.devices
def test_get_functional_capability_by_id_default(api):
try:
assert is_valid_get_functional_capability_by_id(
get_functional_capability_by_id_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_get_isis_interfaces(obj):
json_schema_validate('jsd_84ad8b0e42cab48a_v1_3_0').validate(obj)
return True
def get_isis_interfaces(api):
endpoint_result = api.devices.get_isis_interfaces(
)
return endpoint_result
@pytest.mark.devices
def test_get_isis_interfaces(api):
assert is_valid_get_isis_interfaces(
get_isis_interfaces(api)
)
def get_isis_interfaces_default(api):
endpoint_result = api.devices.get_isis_interfaces(
)
return endpoint_result
@pytest.mark.devices
def test_get_isis_interfaces_default(api):
try:
assert is_valid_get_isis_interfaces(
get_isis_interfaces_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_get_device_config_by_id(obj):
json_schema_validate('jsd_84b33a9e480abcaf_v1_3_0').validate(obj)
return True
def get_device_config_by_id(api):
endpoint_result = api.devices.get_device_config_by_id(
network_device_id='string'
)
return endpoint_result
@pytest.mark.devices
def test_get_device_config_by_id(api):
assert is_valid_get_device_config_by_id(
get_device_config_by_id(api)
)
def get_device_config_by_id_default(api):
endpoint_result = api.devices.get_device_config_by_id(
network_device_id='string'
)
return endpoint_result
@pytest.mark.devices
def test_get_device_config_by_id_default(api):
try:
assert is_valid_get_device_config_by_id(
get_device_config_by_id_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_get_device_summary(obj):
json_schema_validate('jsd_819f9aa54feab7bf_v1_3_0').validate(obj)
return True
def get_device_summary(api):
endpoint_result = api.devices.get_device_summary(
id='string'
)
return endpoint_result
@pytest.mark.devices
def test_get_device_summary(api):
assert is_valid_get_device_summary(
get_device_summary(api)
)
def get_device_summary_default(api):
endpoint_result = api.devices.get_device_summary(
id='string'
)
return endpoint_result
@pytest.mark.devices
def test_get_device_summary_default(api):
try:
assert is_valid_get_device_summary(
get_device_summary_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_get_device_by_id(obj):
json_schema_validate('jsd_8fa8eb404a4a8d96_v1_3_0').validate(obj)
return True
def get_device_by_id(api):
endpoint_result = api.devices.get_device_by_id(
id='string'
)
return endpoint_result
@pytest.mark.devices
def test_get_device_by_id(api):
assert is_valid_get_device_by_id(
get_device_by_id(api)
)
def get_device_by_id_default(api):
endpoint_result = api.devices.get_device_by_id(
id='string'
)
return endpoint_result
@pytest.mark.devices
def test_get_device_by_id_default(api):
try:
assert is_valid_get_device_by_id(
get_device_by_id_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_get_interface_info_by_id(obj):
json_schema_validate('jsd_ba9dc85b4b8a9a17_v1_3_0').validate(obj)
return True
def get_interface_info_by_id(api):
endpoint_result = api.devices.get_interface_info_by_id(
device_id='string'
)
return endpoint_result
@pytest.mark.devices
def test_get_interface_info_by_id(api):
assert is_valid_get_interface_info_by_id(
get_interface_info_by_id(api)
)
def get_interface_info_by_id_default(api):
endpoint_result = api.devices.get_interface_info_by_id(
device_id='string'
)
return endpoint_result
@pytest.mark.devices
def test_get_interface_info_by_id_default(api):
try:
assert is_valid_get_interface_info_by_id(
get_interface_info_by_id_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_register_device_for_wsa(obj):
json_schema_validate('jsd_c9809b6744f8a502_v1_3_0').validate(obj)
return True
def register_device_for_wsa(api):
endpoint_result = api.devices.register_device_for_wsa(
macaddress='string',
serial_number='string'
)
return endpoint_result
@pytest.mark.devices
def test_register_device_for_wsa(api):
assert is_valid_register_device_for_wsa(
register_device_for_wsa(api)
)
def register_device_for_wsa_default(api):
endpoint_result = api.devices.register_device_for_wsa(
macaddress=None,
serial_number=None
)
return endpoint_result
@pytest.mark.devices
def test_register_device_for_wsa_default(api):
try:
assert is_valid_register_device_for_wsa(
register_device_for_wsa_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_update_device_role(obj):
json_schema_validate('jsd_b9855ad54ae98156_v1_3_0').validate(obj)
return True
def update_device_role(api):
endpoint_result = api.devices.update_device_role(
active_validation=True,
id='string',
payload=None,
role='string',
roleSource='string'
)
return endpoint_result
@pytest.mark.devices
def test_update_device_role(api):
assert is_valid_update_device_role(
update_device_role(api)
)
def update_device_role_default(api):
endpoint_result = api.devices.update_device_role(
active_validation=True,
id=None,
payload=None,
role=None,
roleSource=None
)
return endpoint_result
@pytest.mark.devices
def test_update_device_role_default(api):
try:
assert is_valid_update_device_role(
update_device_role_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_get_device_config_for_all_devices(obj):
json_schema_validate('jsd_b7bcaa084e2b90d0_v1_3_0').validate(obj)
return True
def get_device_config_for_all_devices(api):
endpoint_result = api.devices.get_device_config_for_all_devices(
)
return endpoint_result
@pytest.mark.devices
def test_get_device_config_for_all_devices(api):
assert is_valid_get_device_config_for_all_devices(
get_device_config_for_all_devices(api)
)
def get_device_config_for_all_devices_default(api):
endpoint_result = api.devices.get_device_config_for_all_devices(
)
return endpoint_result
@pytest.mark.devices
def test_get_device_config_for_all_devices_default(api):
try:
assert is_valid_get_device_config_for_all_devices(
get_device_config_for_all_devices_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_export_device_list(obj):
json_schema_validate('jsd_cd98780f4888a66d_v1_3_0').validate(obj)
return True
def export_device_list(api):
endpoint_result = api.devices.export_device_list(
active_validation=True,
deviceUuids=['string'],
id='string',
operationEnum='CREDENTIALDETAILS',
parameters=['string'],
password='string',
payload=None
)
return endpoint_result
@pytest.mark.devices
def test_export_device_list(api):
assert is_valid_export_device_list(
export_device_list(api)
)
def export_device_list_default(api):
endpoint_result = api.devices.export_device_list(
active_validation=True,
deviceUuids=None,
id=None,
operationEnum=None,
parameters=None,
password=None,
payload=None
)
return endpoint_result
@pytest.mark.devices
def test_export_device_list_default(api):
try:
assert is_valid_export_device_list(
export_device_list_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_get_interface_by_ip(obj):
json_schema_validate('jsd_cd8469e647caab0e_v1_3_0').validate(obj)
return True
def get_interface_by_ip(api):
endpoint_result = api.devices.get_interface_by_ip(
ip_address='string'
)
return endpoint_result
@pytest.mark.devices
def test_get_interface_by_ip(api):
assert is_valid_get_interface_by_ip(
get_interface_by_ip(api)
)
def get_interface_by_ip_default(api):
endpoint_result = api.devices.get_interface_by_ip(
ip_address='string'
)
return endpoint_result
@pytest.mark.devices
def test_get_interface_by_ip_default(api):
try:
assert is_valid_get_interface_by_ip(
get_interface_by_ip_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_get_network_device_by_ip(obj):
json_schema_validate('jsd_d0a4b88145aabb51_v1_3_0').validate(obj)
return True
def get_network_device_by_ip(api):
endpoint_result = api.devices.get_network_device_by_ip(
ip_address='string'
)
return endpoint_result
@pytest.mark.devices
def test_get_network_device_by_ip(api):
assert is_valid_get_network_device_by_ip(
get_network_device_by_ip(api)
)
def get_network_device_by_ip_default(api):
endpoint_result = api.devices.get_network_device_by_ip(
ip_address='string'
)
return endpoint_result
@pytest.mark.devices
def test_get_network_device_by_ip_default(api):
try:
assert is_valid_get_network_device_by_ip(
get_network_device_by_ip_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_get_device_config_count(obj):
json_schema_validate('jsd_888f585c49b88441_v1_3_0').validate(obj)
return True
def get_device_config_count(api):
endpoint_result = api.devices.get_device_config_count(
)
return endpoint_result
@pytest.mark.devices
def test_get_device_config_count(api):
assert is_valid_get_device_config_count(
get_device_config_count(api)
)
def get_device_config_count_default(api):
endpoint_result = api.devices.get_device_config_count(
)
return endpoint_result
@pytest.mark.devices
def test_get_device_config_count_default(api):
try:
assert is_valid_get_device_config_count(
get_device_config_count_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_get_device_by_serial_number(obj):
json_schema_validate('jsd_d888ab6d4d59a8c1_v1_3_0').validate(obj)
return True
def get_device_by_serial_number(api):
endpoint_result = api.devices.get_device_by_serial_number(
serial_number='string'
)
return endpoint_result
@pytest.mark.devices
def test_get_device_by_serial_number(api):
assert is_valid_get_device_by_serial_number(
get_device_by_serial_number(api)
)
def get_device_by_serial_number_default(api):
endpoint_result = api.devices.get_device_by_serial_number(
serial_number='string'
)
return endpoint_result
@pytest.mark.devices
def test_get_device_by_serial_number_default(api):
try:
assert is_valid_get_device_by_serial_number(
get_device_by_serial_number_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_get_all_interfaces(obj):
json_schema_validate('jsd_f5947a4c439a8bf0_v1_3_0').validate(obj)
return True
def get_all_interfaces(api):
endpoint_result = api.devices.get_all_interfaces(
)
return endpoint_result
@pytest.mark.devices
def test_get_all_interfaces(api):
assert is_valid_get_all_interfaces(
get_all_interfaces(api)
)
def get_all_interfaces_default(api):
endpoint_result = api.devices.get_all_interfaces(
)
return endpoint_result
@pytest.mark.devices
def test_get_all_interfaces_default(api):
try:
assert is_valid_get_all_interfaces(
get_all_interfaces_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_get_module_count(obj):
json_schema_validate('jsd_8db939744649a782_v1_3_0').validate(obj)
return True
def get_module_count(api):
endpoint_result = api.devices.get_module_count(
device_id='string',
name_list='value1,value2',
operational_state_code_list='value1,value2',
part_number_list='value1,value2',
vendor_equipment_type_list='value1,value2'
)
return endpoint_result
@pytest.mark.devices
def test_get_module_count(api):
assert is_valid_get_module_count(
get_module_count(api)
)
def get_module_count_default(api):
endpoint_result = api.devices.get_module_count(
device_id=None,
name_list=None,
operational_state_code_list=None,
part_number_list=None,
vendor_equipment_type_list=None
)
return endpoint_result
@pytest.mark.devices
def test_get_module_count_default(api):
try:
assert is_valid_get_module_count(
get_module_count_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_get_modules(obj):
json_schema_validate('jsd_eb8249e34f69b0f1_v1_3_0').validate(obj)
return True
def get_modules(api):
endpoint_result = api.devices.get_modules(
device_id='string',
limit='string',
name_list='value1,value2',
offset='string',
operational_state_code_list='value1,value2',
part_number_list='value1,value2',
vendor_equipment_type_list='value1,value2'
)
return endpoint_result
@pytest.mark.devices
def test_get_modules(api):
assert is_valid_get_modules(
get_modules(api)
)
def get_modules_default(api):
endpoint_result = api.devices.get_modules(
device_id=None,
limit=None,
name_list=None,
offset=None,
operational_state_code_list=None,
part_number_list=None,
vendor_equipment_type_list=None
)
return endpoint_result
@pytest.mark.devices
def test_get_modules_default(api):
try:
assert is_valid_get_modules(
get_modules_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_get_wireless_lan_controller_details_by_id(obj):
json_schema_validate('jsd_f6826a8e41bba242_v1_3_0').validate(obj)
return True
def get_wireless_lan_controller_details_by_id(api):
endpoint_result = api.devices.get_wireless_lan_controller_details_by_id(
id='string'
)
return endpoint_result
@pytest.mark.devices
def test_get_wireless_lan_controller_details_by_id(api):
assert is_valid_get_wireless_lan_controller_details_by_id(
get_wireless_lan_controller_details_by_id(api)
)
def get_wireless_lan_controller_details_by_id_default(api):
endpoint_result = api.devices.get_wireless_lan_controller_details_by_id(
id='string'
)
return endpoint_result
@pytest.mark.devices
def test_get_wireless_lan_controller_details_by_id_default(api):
try:
assert is_valid_get_wireless_lan_controller_details_by_id(
get_wireless_lan_controller_details_by_id_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_sync_devices(obj):
json_schema_validate('jsd_aeb9eb67460b92df_v1_3_0').validate(obj)
return True
def sync_devices(api):
endpoint_result = api.devices.sync_devices(
active_validation=True,
cliTransport='string',
computeDevice=True,
enablePassword='string',
extendedDiscoveryInfo='string',
httpPassword='string',
httpPort='string',
httpSecure=True,
httpUserName='string',
ipAddress=['string'],
merakiOrgId=['string'],
netconfPort='string',
password='string',
payload=None,
serialNumber='string',
snmpAuthPassphrase='string',
snmpAuthProtocol='string',
snmpMode='string',
snmpPrivPassphrase='string',
snmpPrivProtocol='string',
snmpROCommunity='string',
snmpRWCommunity='string',
snmpRetry=0,
snmpTimeout=0,
snmpUserName='string',
snmpVersion='string',
type='COMPUTE_DEVICE',
updateMgmtIPaddressList=[{'existMgmtIpAddress': 'string', 'newMgmtIpAddress': 'string'}],
userName='string'
)
return endpoint_result
@pytest.mark.devices
def test_sync_devices(api):
assert is_valid_sync_devices(
sync_devices(api)
)
def sync_devices_default(api):
endpoint_result = api.devices.sync_devices(
active_validation=True,
cliTransport=None,
computeDevice=None,
enablePassword=None,
extendedDiscoveryInfo=None,
httpPassword=None,
httpPort=None,
httpSecure=None,
httpUserName=None,
ipAddress=None,
merakiOrgId=None,
netconfPort=None,
password=None,
payload=None,
serialNumber=None,
snmpAuthPassphrase=None,
snmpAuthProtocol=None,
snmpMode=None,
snmpPrivPassphrase=None,
snmpPrivProtocol=None,
snmpROCommunity=None,
snmpRWCommunity=None,
snmpRetry=None,
snmpTimeout=None,
snmpUserName=None,
snmpVersion=None,
type=None,
updateMgmtIPaddressList=None,
userName=None
)
return endpoint_result
@pytest.mark.devices
def test_sync_devices_default(api):
try:
assert is_valid_sync_devices(
sync_devices_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_get_interface_by_id(obj):
json_schema_validate('jsd_b888792d43baba46_v1_3_0').validate(obj)
return True
def get_interface_by_id(api):
endpoint_result = api.devices.get_interface_by_id(
id='string'
)
return endpoint_result
@pytest.mark.devices
def test_get_interface_by_id(api):
assert is_valid_get_interface_by_id(
get_interface_by_id(api)
)
def get_interface_by_id_default(api):
endpoint_result = api.devices.get_interface_by_id(
id='string'
)
return endpoint_result
@pytest.mark.devices
def test_get_interface_by_id_default(api):
try:
assert is_valid_get_interface_by_id(
get_interface_by_id_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_get_functional_capability_for_devices(obj):
json_schema_validate('jsd_c3b3c9ef4e6b8a09_v1_3_0').validate(obj)
return True
def get_functional_capability_for_devices(api):
endpoint_result = api.devices.get_functional_capability_for_devices(
device_id='string',
function_name='value1,value2'
)
return endpoint_result
@pytest.mark.devices
def test_get_functional_capability_for_devices(api):
assert is_valid_get_functional_capability_for_devices(
get_functional_capability_for_devices(api)
)
def get_functional_capability_for_devices_default(api):
endpoint_result = api.devices.get_functional_capability_for_devices(
device_id=None,
function_name=None
)
return endpoint_result
@pytest.mark.devices
def test_get_functional_capability_for_devices_default(api):
try:
assert is_valid_get_functional_capability_for_devices(
get_functional_capability_for_devices_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_get_device_detail(obj):
json_schema_validate('jsd_89b2fb144f5bb09b_v1_3_0').validate(obj)
return True
def get_device_detail(api):
endpoint_result = api.devices.get_device_detail(
identifier='string',
search_by='string',
timestamp=0
)
return endpoint_result
@pytest.mark.devices
def test_get_device_detail(api):
assert is_valid_get_device_detail(
get_device_detail(api)
)
def get_device_detail_default(api):
endpoint_result = api.devices.get_device_detail(
identifier=None,
search_by=None,
timestamp=None
)
return endpoint_result
@pytest.mark.devices
def test_get_device_detail_default(api):
try:
assert is_valid_get_device_detail(
get_device_detail_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_get_network_device_by_pagination_range(obj):
json_schema_validate('jsd_f49548c54be8a3e2_v1_3_0').validate(obj)
return True
def get_network_device_by_pagination_range(api):
endpoint_result = api.devices.get_network_device_by_pagination_range(
records_to_return=0,
start_index=0
)
return endpoint_result
@pytest.mark.devices
def test_get_network_device_by_pagination_range(api):
assert is_valid_get_network_device_by_pagination_range(
get_network_device_by_pagination_range(api)
)
def get_network_device_by_pagination_range_default(api):
endpoint_result = api.devices.get_network_device_by_pagination_range(
records_to_return=0,
start_index=0
)
return endpoint_result
@pytest.mark.devices
def test_get_network_device_by_pagination_range_default(api):
try:
assert is_valid_get_network_device_by_pagination_range(
get_network_device_by_pagination_range_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_retrieves_all_network_devices(obj):
json_schema_validate('jsd_ffa748cc44e9a437_v1_3_0').validate(obj)
return True
def retrieves_all_network_devices(api):
endpoint_result = api.devices.retrieves_all_network_devices(
associated_wlc_ip='string',
collection_interval='string',
collection_status='string',
error_code='string',
family='string',
hostname='string',
limit='string',
mac_address='string',
management_ip_address='string',
offset='string',
platform_id='string',
reachability_failure_reason='string',
reachability_status='string',
role='string',
role_source='string',
serial_number='string',
series='string',
software_type='string',
software_version='string',
type='string',
up_time='string',
vrf_name='string'
)
return endpoint_result
@pytest.mark.devices
def test_retrieves_all_network_devices(api):
assert is_valid_retrieves_all_network_devices(
retrieves_all_network_devices(api)
)
def retrieves_all_network_devices_default(api):
endpoint_result = api.devices.retrieves_all_network_devices(
associated_wlc_ip=None,
collection_interval=None,
collection_status=None,
error_code=None,
family=None,
hostname=None,
limit=None,
mac_address=None,
management_ip_address=None,
offset=None,
platform_id=None,
reachability_failure_reason=None,
reachability_status=None,
role=None,
role_source=None,
serial_number=None,
series=None,
software_type=None,
software_version=None,
type=None,
up_time=None,
vrf_name=None
)
return endpoint_result
@pytest.mark.devices
def test_retrieves_all_network_devices_default(api):
try:
assert is_valid_retrieves_all_network_devices(
retrieves_all_network_devices_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
| 26.159909
| 97
| 0.717832
| 5,862
| 45,806
| 5.19362
| 0.054248
| 0.071736
| 0.030547
| 0.05124
| 0.881196
| 0.857021
| 0.829562
| 0.799901
| 0.765446
| 0.710133
| 0
| 0.016648
| 0.210562
| 45,806
| 1,750
| 98
| 26.174857
| 0.825281
| 0.024888
| 0
| 0.570129
| 0
| 0
| 0.079043
| 0.023579
| 0
| 0
| 0
| 0
| 0.059136
| 1
| 0.147839
| false
| 0.016679
| 0.002274
| 0
| 0.238817
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9148cd2a1e8537ffec6d90651ed4e957a922ee7e
| 11,695
|
py
|
Python
|
formwizard/tests/wizardtests/tests.py
|
djangsters/django-formwizard
|
7b35165f0340aae4e8302d5b05b0cb443f6c9904
|
[
"BSD-3-Clause"
] | 4
|
2015-02-05T00:12:55.000Z
|
2017-08-14T10:37:20.000Z
|
formwizard/tests/wizardtests/tests.py
|
djangsters/django-formwizard
|
7b35165f0340aae4e8302d5b05b0cb443f6c9904
|
[
"BSD-3-Clause"
] | 2
|
2017-05-18T20:21:03.000Z
|
2017-08-16T14:33:28.000Z
|
formwizard/tests/wizardtests/tests.py
|
djangsters/django-formwizard
|
7b35165f0340aae4e8302d5b05b0cb443f6c9904
|
[
"BSD-3-Clause"
] | 4
|
2015-01-20T00:19:22.000Z
|
2017-11-24T15:17:02.000Z
|
from __future__ import with_statement
import os
from django.test import TestCase
from django.conf import settings
from django.contrib.auth.models import User
import formwizard
class WizardTests(object):
urls = 'formwizard.tests.wizardtests.urls'
def setUp(self):
self.testuser, created = User.objects.get_or_create(username='testuser1')
self.wizard_step_data[0]['form1-user'] = self.testuser.pk
def test_initial_call(self):
response = self.client.get(self.wizard_url)
wizard = response.context['wizard']
self.assertEqual(response.status_code, 200)
self.assertEqual(wizard['steps'].current, 'form1')
self.assertEqual(wizard['steps'].step0, 0)
self.assertEqual(wizard['steps'].step1, 1)
self.assertEqual(wizard['steps'].last, 'form4')
self.assertEqual(wizard['steps'].prev, None)
self.assertEqual(wizard['steps'].next, 'form2')
self.assertEqual(wizard['steps'].count, 4)
def test_form_post_error(self):
response = self.client.post(self.wizard_url, self.wizard_step_1_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
self.assertEqual(response.context['wizard']['form'].errors,
{'name': [u'This field is required.'],
'user': [u'This field is required.']})
def test_form_post_success(self):
response = self.client.post(self.wizard_url, self.wizard_step_data[0])
wizard = response.context['wizard']
self.assertEqual(response.status_code, 200)
self.assertEqual(wizard['steps'].current, 'form2')
self.assertEqual(wizard['steps'].step0, 1)
self.assertEqual(wizard['steps'].prev, 'form1')
self.assertEqual(wizard['steps'].next, 'form3')
def test_form_stepback(self):
response = self.client.get(self.wizard_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
response = self.client.post(self.wizard_url, self.wizard_step_data[0])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form2')
response = self.client.post(self.wizard_url, {
'wizard_prev_step': response.context['wizard']['steps'].prev})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
def test_template_context(self):
response = self.client.get(self.wizard_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
self.assertEqual(response.context.get('another_var', None), None)
response = self.client.post(self.wizard_url, self.wizard_step_data[0])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form2')
self.assertEqual(response.context.get('another_var', None), True)
def test_form_finish(self):
response = self.client.get(self.wizard_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
response = self.client.post(self.wizard_url, self.wizard_step_data[0])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form2')
post_data = self.wizard_step_data[1]
post_data['form2-file1'] = open(__file__)
response = self.client.post(self.wizard_url, post_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form3')
response = self.client.post(self.wizard_url, self.wizard_step_data[2])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form4')
response = self.client.post(self.wizard_url, self.wizard_step_data[3])
self.assertEqual(response.status_code, 200)
all_data = response.context['form_list']
self.assertEqual(all_data[1]['file1'].read(), open(__file__).read())
del all_data[1]['file1']
self.assertEqual(all_data, [
{'name': u'Pony', 'thirsty': True, 'user': self.testuser},
{'address1': u'123 Main St', 'address2': u'Djangoland'},
{'random_crap': u'blah blah'},
[{'random_crap': u'blah blah'},
{'random_crap': u'blah blah'}]])
def test_cleaned_data(self):
response = self.client.get(self.wizard_url)
self.assertEqual(response.status_code, 200)
response = self.client.post(self.wizard_url, self.wizard_step_data[0])
self.assertEqual(response.status_code, 200)
post_data = self.wizard_step_data[1]
post_data['form2-file1'] = open(__file__)
response = self.client.post(self.wizard_url, post_data)
self.assertEqual(response.status_code, 200)
response = self.client.post(self.wizard_url, self.wizard_step_data[2])
self.assertEqual(response.status_code, 200)
response = self.client.post(self.wizard_url, self.wizard_step_data[3])
self.assertEqual(response.status_code, 200)
all_data = response.context['all_cleaned_data']
self.assertEqual(all_data['file1'].read(), open(__file__).read())
del all_data['file1']
self.assertEqual(all_data, {
'name': u'Pony', 'thirsty': True, 'user': self.testuser,
'address1': u'123 Main St', 'address2': u'Djangoland',
'random_crap': u'blah blah', 'formset-form4': [
{'random_crap': u'blah blah'},
{'random_crap': u'blah blah'}]})
def test_manipulated_data(self):
response = self.client.get(self.wizard_url)
self.assertEqual(response.status_code, 200)
response = self.client.post(self.wizard_url, self.wizard_step_data[0])
self.assertEqual(response.status_code, 200)
post_data = self.wizard_step_data[1]
post_data['form2-file1'] = open(__file__)
response = self.client.post(self.wizard_url, post_data)
self.assertEqual(response.status_code, 200)
response = self.client.post(self.wizard_url, self.wizard_step_data[2])
self.assertEqual(response.status_code, 200)
self.client.cookies.pop('sessionid', None)
self.client.cookies.pop('wizard_cookie_contact_wizard', None)
response = self.client.post(self.wizard_url, self.wizard_step_data[3])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
def test_form_refresh(self):
response = self.client.get(self.wizard_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
response = self.client.post(self.wizard_url, self.wizard_step_data[0])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form2')
response = self.client.post(self.wizard_url, self.wizard_step_data[0])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form2')
post_data = self.wizard_step_data[1]
post_data['form2-file1'] = open(__file__)
response = self.client.post(self.wizard_url, post_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form3')
response = self.client.post(self.wizard_url, self.wizard_step_data[2])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form4')
response = self.client.post(self.wizard_url, self.wizard_step_data[0])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form2')
response = self.client.post(self.wizard_url, self.wizard_step_data[3])
self.assertEqual(response.status_code, 200)
class SessionWizardTests(WizardTests, TestCase):
wizard_url = '/wiz_session/'
wizard_step_1_data = {
'session_contact_wizard-current_step': 'form1',
}
wizard_step_data = (
{
'form1-name': 'Pony',
'form1-thirsty': '2',
'session_contact_wizard-current_step': 'form1',
},
{
'form2-address1': '123 Main St',
'form2-address2': 'Djangoland',
'session_contact_wizard-current_step': 'form2',
},
{
'form3-random_crap': 'blah blah',
'session_contact_wizard-current_step': 'form3',
},
{
'form4-INITIAL_FORMS': '0',
'form4-TOTAL_FORMS': '2',
'form4-MAX_NUM_FORMS': '0',
'form4-0-random_crap': 'blah blah',
'form4-1-random_crap': 'blah blah',
'session_contact_wizard-current_step': 'form4',
}
)
class CookieWizardTests(WizardTests, TestCase):
wizard_url = '/wiz_cookie/'
wizard_step_1_data = {
'cookie_contact_wizard-current_step': 'form1',
}
wizard_step_data = (
{
'form1-name': 'Pony',
'form1-thirsty': '2',
'cookie_contact_wizard-current_step': 'form1',
},
{
'form2-address1': '123 Main St',
'form2-address2': 'Djangoland',
'cookie_contact_wizard-current_step': 'form2',
},
{
'form3-random_crap': 'blah blah',
'cookie_contact_wizard-current_step': 'form3',
},
{
'form4-INITIAL_FORMS': '0',
'form4-TOTAL_FORMS': '2',
'form4-MAX_NUM_FORMS': '0',
'form4-0-random_crap': 'blah blah',
'form4-1-random_crap': 'blah blah',
'cookie_contact_wizard-current_step': 'form4',
}
)
class WizardTestKwargs(TestCase):
wizard_url = '/wiz_other_template/'
wizard_step_1_data = {
'cookie_contact_wizard-current_step': 'form1',
}
wizard_step_data = (
{
'form1-name': 'Pony',
'form1-thirsty': '2',
'cookie_contact_wizard-current_step': 'form1',
},
{
'form2-address1': '123 Main St',
'form2-address2': 'Djangoland',
'cookie_contact_wizard-current_step': 'form2',
},
{
'form3-random_crap': 'blah blah',
'cookie_contact_wizard-current_step': 'form3',
},
{
'form4-INITIAL_FORMS': '0',
'form4-TOTAL_FORMS': '2',
'form4-MAX_NUM_FORMS': '0',
'form4-0-random_crap': 'blah blah',
'form4-1-random_crap': 'blah blah',
'cookie_contact_wizard-current_step': 'form4',
}
)
urls = 'formwizard.tests.wizardtests.urls'
def setUp(self):
self.testuser, created = User.objects.get_or_create(username='testuser1')
self.wizard_step_data[0]['form1-user'] = self.testuser.pk
def test_template(self):
templates = os.path.join(os.path.dirname(__file__), 'templates')
with self.settings(
TEMPLATE_DIRS=list(settings.TEMPLATE_DIRS) + [templates]):
response = self.client.get(self.wizard_url)
self.assertTemplateUsed(response, 'other_wizard_form.html')
| 41.035088
| 81
| 0.633604
| 1,359
| 11,695
| 5.234731
| 0.091979
| 0.137054
| 0.161653
| 0.122294
| 0.879393
| 0.832865
| 0.823166
| 0.823166
| 0.794771
| 0.789429
| 0
| 0.028054
| 0.225823
| 11,695
| 284
| 82
| 41.179577
| 0.757676
| 0
| 0
| 0.59751
| 0
| 0
| 0.194459
| 0.053959
| 0
| 0
| 0
| 0
| 0.273859
| 1
| 0.049793
| false
| 0
| 0.024896
| 0
| 0.136929
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
914c36194321c929b91469d5d4a236a46c7f5471
| 4,212
|
py
|
Python
|
tests/Python/Dynamic/PyATKDynamic_swellgain_test.py
|
D-J-Roberts/AudioTK
|
accf009d7238f32702eb1d5ee23c5148fc68e3bd
|
[
"BSD-3-Clause"
] | 249
|
2015-01-05T13:36:26.000Z
|
2022-03-15T18:47:46.000Z
|
tests/Python/Dynamic/PyATKDynamic_swellgain_test.py
|
D-J-Roberts/AudioTK
|
accf009d7238f32702eb1d5ee23c5148fc68e3bd
|
[
"BSD-3-Clause"
] | 22
|
2015-07-28T15:20:24.000Z
|
2020-07-11T14:18:19.000Z
|
tests/Python/Dynamic/PyATKDynamic_swellgain_test.py
|
D-J-Roberts/AudioTK
|
accf009d7238f32702eb1d5ee23c5148fc68e3bd
|
[
"BSD-3-Clause"
] | 48
|
2015-08-15T12:08:13.000Z
|
2021-04-07T02:33:07.000Z
|
#!/usr/bin/env python
from ATK.Core import DoubleInPointerFilter, DoubleOutPointerFilter
from ATK.Dynamic import DoubleGainCompressorFilter, DoubleGainSwellFilter
from ATK.Tools import DoubleApplyGainFilter
sample_rate = 96000
def filter(input, ratio=4, threshold=1, softness=1):
import numpy as np
output = np.zeros(input.shape, dtype=np.float64)
input2 = input**2
in2filter = DoubleInPointerFilter(input2, False)
in2filter.input_sampling_rate = sample_rate
infilter = DoubleInPointerFilter(input, False)
infilter.input_sampling_rate = sample_rate
gainfilter = DoubleGainCompressorFilter(1)
gainfilter.input_sampling_rate = sample_rate
gainfilter.set_input_port(0, in2filter, 0)
gainfilter.threshold = threshold
gainfilter.ratio = ratio
gainfilter.softness = softness
applygainfilter = DoubleApplyGainFilter(1)
applygainfilter.input_sampling_rate = sample_rate
applygainfilter.set_input_port(0, gainfilter, 0)
applygainfilter.set_input_port(1, infilter, 0)
outfilter = DoubleOutPointerFilter(output, False)
outfilter.input_sampling_rate = sample_rate
outfilter.set_input_port(0, applygainfilter, 0)
outfilter.process(input.shape[1])
return output
def colored_filter(input, ratio=4, threshold=1, softness=1):
import numpy as np
output = np.zeros(input.shape, dtype=np.float64)
input2 = input**2
in2filter = DoubleInPointerFilter(input2, False)
in2filter.input_sampling_rate = sample_rate
infilter = DoubleInPointerFilter(input, False)
infilter.input_sampling_rate = sample_rate
gainfilter = DoubleGainSwellFilter(1)
gainfilter.input_sampling_rate = sample_rate
gainfilter.set_input_port(0, in2filter, 0)
gainfilter.threshold = threshold
gainfilter.ratio = ratio
gainfilter.softness = softness
applygainfilter = DoubleApplyGainFilter(1)
applygainfilter.input_sampling_rate = sample_rate
applygainfilter.set_input_port(0, gainfilter, 0)
applygainfilter.set_input_port(1, infilter, 0)
outfilter = DoubleOutPointerFilter(output, False)
outfilter.input_sampling_rate = sample_rate
outfilter.set_input_port(0, applygainfilter, 0)
outfilter.process(input.shape[1])
return output
def swell_test():
import numpy as np
from numpy.testing import assert_almost_equal
import os
dirname = os.path.dirname(__file__)
x = np.fromfile(dirname + os.sep + "input_swellgain.dat", dtype=np.float64).reshape(1, -1)
ref = np.fromfile(dirname + os.sep + "output_swellgain.dat", dtype=np.float64).reshape(1, -1)
out = filter(x, 1, 1, 1)
assert_almost_equal(out, ref)
def swell_1_1_test():
import numpy as np
from numpy.testing import assert_almost_equal
import os
dirname = os.path.dirname(__file__)
x = np.fromfile(dirname + os.sep + "input_swellgain.dat", dtype=np.float64).reshape(1, -1)
ref = np.fromfile(dirname + os.sep + "output_swellgain_1_1.dat", dtype=np.float64).reshape(1, -1)
out = colored_filter(x, 1, 1, 1)
assert_almost_equal(out, ref)
def swell_2_1_test():
import numpy as np
from numpy.testing import assert_almost_equal
import os
dirname = os.path.dirname(__file__)
x = np.fromfile(dirname + os.sep + "input_swellgain.dat", dtype=np.float64).reshape(1, -1)
ref = np.fromfile(dirname + os.sep + "output_swellgain_2_1.dat", dtype=np.float64).reshape(1, -1)
out = colored_filter(x, 2, 1, 1)
assert_almost_equal(out, ref)
if __name__ == "__main__":
import numpy as np
import matplotlib.pyplot as plt
size = 1000
x = np.arange(10, size, dtype=np.float64).reshape(1, -1) / 100
x.tofile("input_swellgain.dat")
out_1_1_1 = filter(x, 1, 1, 1)
out_1_1_1.tofile("output_swellgain.dat")
max_out_1_1 = colored_filter(x, 1, 1, 1)
max_out_1_1.tofile("output_swellgain_1_1.dat")
max_out_2_1 = colored_filter(x, 2, 1, 1)
max_out_2_1.tofile("output_swellgain_2_1.dat")
plt.figure()
plt.loglog(x[0], out_1_1_1[0], label="compressor, ratio(1), threshold(1), softness(1)")
plt.loglog(x[0], max_out_1_1[0], label="swell, ratio(1), threshold(1), softness(1)")
plt.loglog(x[0], max_out_2_1[0], label="swell, ratio(2), threshold(1), softness(1)")
plt.title("Swell gain")
plt.legend(loc=4)
plt.grid()
plt.show()
| 31.909091
| 99
| 0.746676
| 606
| 4,212
| 4.965347
| 0.148515
| 0.019276
| 0.056497
| 0.076437
| 0.834829
| 0.790296
| 0.77102
| 0.762712
| 0.753074
| 0.753074
| 0
| 0.042242
| 0.140076
| 4,212
| 131
| 100
| 32.152672
| 0.788515
| 0.004748
| 0
| 0.628866
| 0
| 0
| 0.086137
| 0.022906
| 0
| 0
| 0
| 0
| 0.061856
| 1
| 0.051546
| false
| 0
| 0.164948
| 0
| 0.237113
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e671a756af30787a22d288d1457e85890664447d
| 151
|
py
|
Python
|
Udemy/GeekUniversity/secao_4/ex8_conversor_kelvin_p_celsuis.py
|
SandboxGTASA/Python-1
|
bbb5f8bdf7d5110528e457b2a9ebdb2d67e40805
|
[
"MIT"
] | null | null | null |
Udemy/GeekUniversity/secao_4/ex8_conversor_kelvin_p_celsuis.py
|
SandboxGTASA/Python-1
|
bbb5f8bdf7d5110528e457b2a9ebdb2d67e40805
|
[
"MIT"
] | null | null | null |
Udemy/GeekUniversity/secao_4/ex8_conversor_kelvin_p_celsuis.py
|
SandboxGTASA/Python-1
|
bbb5f8bdf7d5110528e457b2a9ebdb2d67e40805
|
[
"MIT"
] | null | null | null |
kelvin = float(input('Entre com a temperatura em Kelvin: '))
c = kelvin - 273.15
print(f'A temperatura em Kelvin convertida para Celsius é de: {c}')
| 25.166667
| 67
| 0.708609
| 25
| 151
| 4.28
| 0.72
| 0.224299
| 0.261682
| 0.373832
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.04
| 0.172185
| 151
| 5
| 68
| 30.2
| 0.816
| 0
| 0
| 0
| 0
| 0
| 0.609272
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e679bb586817f3b04e1a1806bc2b851af82b2dd0
| 124
|
py
|
Python
|
mapek_framework/src/test_mapek_framework/test_managed_system.py
|
imcatta/ros-mapek-framework
|
55e5aa299d322df4edea9b5898e969add122c445
|
[
"MIT"
] | 1
|
2019-08-18T17:36:42.000Z
|
2019-08-18T17:36:42.000Z
|
mapek_framework/src/test_mapek_framework/test_managed_system.py
|
imcatta/ros-mapek-framework
|
55e5aa299d322df4edea9b5898e969add122c445
|
[
"MIT"
] | 4
|
2019-02-01T13:07:49.000Z
|
2019-02-04T14:10:46.000Z
|
mapek_framework/src/test_mapek_framework/test_managed_system.py
|
imcatta/ros-mapek-framework
|
55e5aa299d322df4edea9b5898e969add122c445
|
[
"MIT"
] | 1
|
2019-09-06T13:03:51.000Z
|
2019-09-06T13:03:51.000Z
|
import unittest
from mapek_framework import ManagedSystem
class ManagedSystemTestCase(unittest.TestCase):
pass
| 20.666667
| 47
| 0.790323
| 12
| 124
| 8.083333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.177419
| 124
| 6
| 48
| 20.666667
| 0.95098
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.25
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
e6956c35a56722f2ba4aac72cf5966b1d268d604
| 200
|
py
|
Python
|
webapp/manage.py
|
zhiwehu/IBookmark
|
b416f14f2b7ede4f38a00f386c2cdac01cbd740f
|
[
"Apache-2.0"
] | 1
|
2020-04-01T11:11:37.000Z
|
2020-04-01T11:11:37.000Z
|
webapp/manage.py
|
zhiwehu/IBookmark
|
b416f14f2b7ede4f38a00f386c2cdac01cbd740f
|
[
"Apache-2.0"
] | null | null | null |
webapp/manage.py
|
zhiwehu/IBookmark
|
b416f14f2b7ede4f38a00f386c2cdac01cbd740f
|
[
"Apache-2.0"
] | 2
|
2019-10-04T06:00:32.000Z
|
2021-02-03T08:08:27.000Z
|
#!/usr/bin/env python
from django.core.management import execute_from_command_line
import environment
environment.setup_environ(__file__)
if __name__ == "__main__":
execute_from_command_line()
| 20
| 60
| 0.81
| 26
| 200
| 5.5
| 0.730769
| 0.153846
| 0.251748
| 0.307692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105
| 200
| 9
| 61
| 22.222222
| 0.798883
| 0.1
| 0
| 0
| 0
| 0
| 0.044693
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
e69c9a56157705b84f1e8585b89a2f87f6564b96
| 3,211
|
py
|
Python
|
tests/test_backoff.py
|
alexferl/justbackoff
|
8d99124122e744b1e6a721742c2bcc96b7917993
|
[
"MIT"
] | null | null | null |
tests/test_backoff.py
|
alexferl/justbackoff
|
8d99124122e744b1e6a721742c2bcc96b7917993
|
[
"MIT"
] | null | null | null |
tests/test_backoff.py
|
alexferl/justbackoff
|
8d99124122e744b1e6a721742c2bcc96b7917993
|
[
"MIT"
] | null | null | null |
import unittest
from justbackoff import Backoff, to_ms, to_seconds
class CustomAssertions:
@staticmethod
def assert_between(actual: float, low: float, high: float):
if actual < low:
raise AssertionError("Got {}, expecting >= {}".format(actual, low))
if actual > high:
msg = "Got {}, expecting <= {}".format(actual, high)
raise AssertionError(msg)
class TestBackoff(unittest.TestCase, CustomAssertions):
def setUp(self):
self.b = Backoff(min_ms=100.0, max_ms=10000.0, factor=2.0)
def test_defaults(self):
self.assertEqual(self.b.duration(), to_seconds(100.0))
self.assertEqual(self.b.duration(), to_seconds(200.0))
self.assertEqual(self.b.duration(), to_seconds(400.0))
self.b.reset()
self.assertEqual(self.b.duration(), to_seconds(100.0))
def test_factor(self):
b = Backoff(min_ms=100, max_ms=10000, factor=1.5)
self.assertEqual(b.duration(), to_seconds(100.0))
self.assertEqual(b.duration(), to_seconds(150.0))
self.assertEqual(b.duration(), to_seconds(225.0))
b.reset()
self.assertEqual(b.duration(), to_seconds(100.0))
def test_for_attempt(self):
self.assertEqual(self.b.for_attempt(0), to_seconds(100.0))
self.assertEqual(self.b.for_attempt(1), to_seconds(200.0))
self.assertEqual(self.b.for_attempt(2), to_seconds(400.0))
self.b.reset()
self.assertEqual(self.b.for_attempt(0), to_seconds(100.0))
def test_get_attempt(self):
self.assertEqual(self.b.attempt(), 0)
self.assertEqual(self.b.duration(), to_seconds(100.0))
self.assertEqual(self.b.attempt(), 1)
self.assertEqual(self.b.duration(), to_seconds(200.0))
self.assertEqual(self.b.attempt(), 2)
self.assertEqual(self.b.duration(), to_seconds(400.0))
self.assertEqual(self.b.attempt(), 3)
self.b.reset()
self.assertEqual(self.b.attempt(), 0)
self.assertEqual(self.b.duration(), to_seconds(100.0))
self.assertEqual(self.b.attempt(), 1)
def test_jitter(self):
b = Backoff(min_ms=100.0, max_ms=10000.0, factor=2.0, jitter=True)
self.assertEqual(b.duration(), to_seconds(100.0))
self.assert_between(b.duration(), to_seconds(100.0), to_seconds(200.0))
self.assert_between(b.duration(), to_seconds(100.0), to_seconds(400.0))
b.reset()
self.assertEqual(b.duration(), to_seconds(100.0))
def test_integers(self):
b = Backoff(min_ms=100, max_ms=10000, factor=2)
self.assertEqual(b.duration(), to_seconds(100.0))
self.assertEqual(b.duration(), to_seconds(200.0))
self.assertEqual(b.duration(), to_seconds(400.0))
b.reset()
self.assertEqual(b.duration(), to_seconds(100.0))
def test_to_ms(self):
self.assertEqual(10000, to_ms(10.0))
def test_min_bigger_than_max(self):
b = Backoff(min_ms=10000.0, max_ms=1000.0, factor=2)
self.assertEqual(b.duration(), 1.0)
self.assertEqual(b.duration(), 1.0)
self.assertEqual(b.duration(), 1.0)
b.reset()
self.assertEqual(b.duration(), 1.0)
| 37.337209
| 79
| 0.63843
| 458
| 3,211
| 4.344978
| 0.128821
| 0.248744
| 0.110553
| 0.180905
| 0.769849
| 0.757789
| 0.723116
| 0.671357
| 0.631658
| 0.603015
| 0
| 0.072492
| 0.205232
| 3,211
| 85
| 80
| 37.776471
| 0.707288
| 0
| 0
| 0.462687
| 0
| 0
| 0.014326
| 0
| 0
| 0
| 0
| 0
| 0.597015
| 1
| 0.149254
| false
| 0
| 0.029851
| 0
| 0.208955
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e6b6b25cb2f1a9b15538ae2b6015c734c94717c7
| 10,335
|
py
|
Python
|
tests/pytests/unit/states/test_win_network.py
|
waynegemmell/salt
|
88056db3589cccab8956c2ae4f9b733acce89461
|
[
"Apache-2.0"
] | 9,425
|
2015-01-01T05:59:24.000Z
|
2022-03-31T20:44:05.000Z
|
tests/pytests/unit/states/test_win_network.py
|
waynegemmell/salt
|
88056db3589cccab8956c2ae4f9b733acce89461
|
[
"Apache-2.0"
] | 33,507
|
2015-01-01T00:19:56.000Z
|
2022-03-31T23:48:20.000Z
|
tests/pytests/unit/states/test_win_network.py
|
waynegemmell/salt
|
88056db3589cccab8956c2ae4f9b733acce89461
|
[
"Apache-2.0"
] | 5,810
|
2015-01-01T19:11:45.000Z
|
2022-03-31T02:37:20.000Z
|
"""
:codeauthor: Rahul Handay <rahulha@saltstack.com>
"""
import pytest
import salt.states.win_network as win_network
from tests.support.mock import MagicMock, patch
@pytest.fixture
def configure_loader_modules():
return {win_network: {}}
def test_managed_missing_parameters():
"""
Test to ensure that the named interface is configured properly.
"""
ret = {
"name": "salt",
"changes": {},
"result": False,
"comment": (
"dns_proto must be one of the following: static, dhcp\n"
"ip_proto must be one of the following: static, dhcp"
),
}
assert win_network.managed("salt") == ret
def test_managed_static_enabled_false():
ret = {
"name": "salt",
"changes": {},
"result": True,
"comment": "Interface 'salt' is up to date (already disabled)",
}
mock_false = MagicMock(return_value=False)
with patch.dict(win_network.__salt__, {"ip.is_enabled": mock_false}):
assert (
win_network.managed(
"salt", dns_proto="static", ip_proto="static", enabled=False
)
== ret
)
def test_managed_test_true():
ret = {
"name": "salt",
"changes": {},
"result": False,
"comment": "Failed to enable interface 'salt' to make changes",
}
mock_false = MagicMock(return_value=False)
with patch.dict(
win_network.__salt__, {"ip.is_enabled": mock_false, "ip.enable": mock_false}
), patch.dict(win_network.__opts__, {"test": False}):
assert win_network.managed("salt", dns_proto="static", ip_proto="static") == ret
def test_managed_validate_errors():
ret = {
"name": "salt",
"changes": {},
"result": False,
"comment": (
"The following SLS configuration errors were "
"detected:\n"
"- First Error\n"
"- Second Error"
),
}
mock_true = MagicMock(return_value=True)
mock_validate = MagicMock(return_value=["First Error", "Second Error"])
with patch.dict(win_network.__salt__, {"ip.is_enabled": mock_true}), patch.object(
win_network, "_validate", mock_validate
):
assert win_network.managed("salt", dns_proto="static", ip_proto="static") == ret
def test_managed_get_current_config_failed():
ret = {
"name": "salt",
"changes": {},
"result": False,
"comment": "Unable to get current configuration for interface 'salt'",
}
mock_true = MagicMock(return_value=True)
mock_false = MagicMock(return_value=False)
mock_validate = MagicMock(return_value=[])
with patch.dict(
win_network.__salt__,
{"ip.is_enabled": mock_true, "ip.get_interface": mock_false},
), patch.object(win_network, "_validate", mock_validate):
assert win_network.managed("salt", dns_proto="dhcp", ip_proto="dhcp") == ret
def test_managed_test_true_no_changes():
ret = {
"name": "salt",
"changes": {},
"result": True,
"comment": "Interface 'salt' is up to date",
}
mock_true = MagicMock(return_value=True)
mock_validate = MagicMock(return_value=[])
mock_get_int = MagicMock(
return_value={
"DHCP enabled": "yes",
"DNS servers configured through DHCP": "192.168.0.10",
}
)
with patch.dict(
win_network.__salt__,
{"ip.is_enabled": mock_true, "ip.get_interface": mock_get_int},
), patch.dict(win_network.__opts__, {"test": True}), patch.object(
win_network, "_validate", mock_validate
):
assert win_network.managed("salt", dns_proto="dhcp", ip_proto="dhcp") == ret
def test_managed_test_true_changes():
ret = {
"name": "salt",
"changes": {},
"result": None,
"comment": (
"The following changes will be made to interface "
"'salt':\n"
"- DNS protocol will be changed to: dhcp"
),
}
mock_true = MagicMock(return_value=True)
mock_validate = MagicMock(return_value=[])
mock_get_int = MagicMock(
return_value={
"DHCP enabled": "no",
"Statically Configured DNS Servers": "192.168.0.10",
}
)
with patch.dict(
win_network.__salt__,
{"ip.is_enabled": mock_true, "ip.get_interface": mock_get_int},
), patch.dict(win_network.__opts__, {"test": True}), patch.object(
win_network, "_validate", mock_validate
):
assert win_network.managed("salt", dns_proto="dhcp", ip_proto="dhcp") == ret
def test_managed_failed():
ret = {
"name": "salt",
"changes": {},
"result": False,
"comment": "Failed to set desired configuration settings for interface 'salt'",
}
mock_true = MagicMock(return_value=True)
mock_validate = MagicMock(return_value=[])
mock_get_int = MagicMock(
return_value={
"DHCP enabled": "no",
"Statically Configured DNS Servers": "192.168.0.10",
}
)
with patch.dict(
win_network.__salt__,
{
"ip.is_enabled": mock_true,
"ip.get_interface": mock_get_int,
"ip.set_dhcp_dns": mock_true,
"ip.set_dhcp_ip": mock_true,
},
), patch.dict(win_network.__opts__, {"test": False}), patch.object(
win_network, "_validate", mock_validate
):
assert win_network.managed("salt", dns_proto="dhcp", ip_proto="dhcp") == ret
def test_managed():
ret = {
"name": "salt",
"changes": {
"DHCP enabled": {"new": "yes", "old": "no"},
"DNS servers configured through DHCP": {"new": "192.168.0.10", "old": ""},
"Statically Configured DNS Servers": {"new": "", "old": "192.168.0.10"},
},
"result": True,
"comment": "Successfully updated configuration for interface 'salt'",
}
mock_true = MagicMock(return_value=True)
mock_validate = MagicMock(return_value=[])
mock_get_int = MagicMock(
side_effect=[
{
"DHCP enabled": "no",
"Statically Configured DNS Servers": "192.168.0.10",
},
{
"DHCP enabled": "yes",
"DNS servers configured through DHCP": "192.168.0.10",
},
]
)
with patch.dict(
win_network.__salt__,
{
"ip.is_enabled": mock_true,
"ip.get_interface": mock_get_int,
"ip.set_dhcp_dns": mock_true,
"ip.set_dhcp_ip": mock_true,
},
), patch.dict(win_network.__opts__, {"test": False}), patch.object(
win_network, "_validate", mock_validate
):
assert win_network.managed("salt", dns_proto="dhcp", ip_proto="dhcp") == ret
def test_managed_static_dns_clear():
expected = {
"name": "salt",
"changes": {
"Statically Configured DNS Servers": {"new": "None", "old": "192.168.0.10"}
},
"result": True,
"comment": "Successfully updated configuration for interface 'salt'",
}
mock_true = MagicMock(return_value=True)
mock_validate = MagicMock(return_value=[])
mock_get_int = MagicMock(
side_effect=[
{
"DHCP enabled": "no",
"Statically Configured DNS Servers": "192.168.0.10",
},
{"DHCP enabled": "no", "Statically Configured DNS Servers": "None"},
]
)
with patch.dict(
win_network.__salt__,
{
"ip.is_enabled": mock_true,
"ip.get_interface": mock_get_int,
"ip.set_static_dns": mock_true,
},
), patch.dict(win_network.__opts__, {"test": False}), patch.object(
win_network, "_validate", mock_validate
):
ret = win_network.managed(
"salt", dns_proto="static", dns_servers=[], ip_proto="dhcp"
)
assert ret == expected
def test_managed_static_dns():
expected = {
"name": "salt",
"changes": {
"Statically Configured DNS Servers": {"new": "192.168.0.10", "old": "None"}
},
"result": True,
"comment": "Successfully updated configuration for interface 'salt'",
}
mock_true = MagicMock(return_value=True)
mock_validate = MagicMock(return_value=[])
mock_get_int = MagicMock(
side_effect=[
{"DHCP enabled": "no", "Statically Configured DNS Servers": "None"},
{
"DHCP enabled": "no",
"Statically Configured DNS Servers": "192.168.0.10",
},
]
)
with patch.dict(
win_network.__salt__,
{
"ip.is_enabled": mock_true,
"ip.get_interface": mock_get_int,
"ip.set_static_dns": mock_true,
},
), patch.dict(win_network.__opts__, {"test": False}), patch.object(
win_network, "_validate", mock_validate
):
ret = win_network.managed(
"salt",
dns_proto="static",
dns_servers=["192.168.0.10"],
ip_proto="dhcp",
)
assert ret == expected
def test_managed_static_dns_no_action():
expected = {
"name": "salt",
"changes": {},
"result": True,
"comment": "Interface 'salt' is up to date",
}
mock_true = MagicMock(return_value=True)
mock_validate = MagicMock(return_value=[])
mock_get_int = MagicMock(
return_value={
"DHCP enabled": "no",
"Statically Configured DNS Servers": "192.168.0.10",
}
)
with patch.dict(
win_network.__salt__,
{
"ip.is_enabled": mock_true,
"ip.get_interface": mock_get_int,
"ip.set_static_dns": mock_true,
},
), patch.dict(win_network.__opts__, {"test": False}), patch.object(
win_network, "_validate", mock_validate
):
# Don't pass dns_servers
ret = win_network.managed("salt", dns_proto="static", ip_proto="dhcp")
assert ret == expected
# Pass dns_servers=None
ret = win_network.managed(
"salt", dns_proto="static", dns_servers=None, ip_proto="dhcp"
)
assert ret == expected
| 31.413374
| 88
| 0.568553
| 1,137
| 10,335
| 4.866315
| 0.100264
| 0.079523
| 0.090367
| 0.065245
| 0.868426
| 0.840412
| 0.808241
| 0.790349
| 0.773902
| 0.716429
| 0
| 0.016019
| 0.293275
| 10,335
| 328
| 89
| 31.509146
| 0.741512
| 0.015385
| 0
| 0.628866
| 0
| 0
| 0.254927
| 0
| 0
| 0
| 0
| 0
| 0.044674
| 1
| 0.044674
| false
| 0
| 0.010309
| 0.003436
| 0.058419
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e6c2807a6d162830d87e51e42c2b02735337f341
| 55
|
py
|
Python
|
test.py
|
RealityAbb/panSky
|
fadf7063094f809f679d0bcaafbd161054b6b63b
|
[
"Apache-2.0"
] | null | null | null |
test.py
|
RealityAbb/panSky
|
fadf7063094f809f679d0bcaafbd161054b6b63b
|
[
"Apache-2.0"
] | null | null | null |
test.py
|
RealityAbb/panSky
|
fadf7063094f809f679d0bcaafbd161054b6b63b
|
[
"Apache-2.0"
] | null | null | null |
from CTFd.models import get_id
print(get_id("1234567"))
| 27.5
| 30
| 0.8
| 10
| 55
| 4.2
| 0.8
| 0.238095
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137255
| 0.072727
| 55
| 2
| 31
| 27.5
| 0.686275
| 0
| 0
| 0
| 0
| 0
| 0.125
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
fc3895f4c5e42de64e0861e83b74e6a6914e1a8a
| 32
|
py
|
Python
|
nglview/theme/__init__.py
|
tovrstra/nglview
|
12ab36c78dffc04bc2dde1f4048bc61ca75d33a5
|
[
"MIT"
] | 161
|
2020-07-28T14:05:57.000Z
|
2022-03-31T08:38:06.000Z
|
nglview/theme/__init__.py
|
tovrstra/nglview
|
12ab36c78dffc04bc2dde1f4048bc61ca75d33a5
|
[
"MIT"
] | 123
|
2020-07-27T15:02:27.000Z
|
2022-03-30T18:31:51.000Z
|
nglview/theme/__init__.py
|
tovrstra/nglview
|
12ab36c78dffc04bc2dde1f4048bc61ca75d33a5
|
[
"MIT"
] | 42
|
2020-07-28T09:50:06.000Z
|
2022-03-11T18:50:22.000Z
|
from .theme import ThemeManager
| 16
| 31
| 0.84375
| 4
| 32
| 6.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 32
| 1
| 32
| 32
| 0.964286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fc52e144812958fd47d18fcda49e62314066b23d
| 1,587
|
py
|
Python
|
python/easy/1470_Shuffle_the_Array.py
|
JackWang0107/leetcode
|
c02932190b639ef87a8d0fcd07d9cd6ec7344a67
|
[
"MIT"
] | 1
|
2021-05-22T03:27:33.000Z
|
2021-05-22T03:27:33.000Z
|
python/easy/1470_Shuffle_the_Array.py
|
JackWang0107/leetcode
|
c02932190b639ef87a8d0fcd07d9cd6ec7344a67
|
[
"MIT"
] | null | null | null |
python/easy/1470_Shuffle_the_Array.py
|
JackWang0107/leetcode
|
c02932190b639ef87a8d0fcd07d9cd6ec7344a67
|
[
"MIT"
] | null | null | null |
from typing import *
class Solution:
# 60 ms, faster than 61.39% of Python3 online submissions for Shuffle the Array.
# 14.2 MB, less than 98.76% of Python3 online submissions for Shuffle the Array.
def shuffle(self, nums: List[int], n: int) -> List[int]:
ans = []
for i in range(n):
ans.append(nums[i])
ans.append(nums[n + i])
return ans
# 56 ms, faster than 84.56% of Python3 online submissions for Shuffle the Array.
# 14.2 MB, less than 98.76% of Python3 online submissions for Shuffle the Array.
def shuffle(self, nums: List[int], n: int) -> List[int]:
ans = []
for x, y in zip(nums[:n], nums[n:]):
ans.append(x)
ans.append(y)
return ans
# 60 ms, faster than 61.39% of Python3 online submissions for Shuffle the Array.
# 14.5 MB, less than 49.20% of Python3 online submissions for Shuffle the Array.
def shuffle(self, nums: List[int], n: int) -> List[int]:
left = nums[:n]
right = nums[n:]
ans = [ None ]* (n *2)
ans[::2] = left
ans[1::2] = right
return ans
# 60 ms, faster than 61.39% of Python3 online submissions for Shuffle the Array.
# 14.5 MB, less than 49.20% of Python3 online submissions for Shuffle the Array.
def shuffle(self, nums: List[int], n: int) -> List[int]:
ans = [ None ] * ( n* 2)
ans[::2] = nums[:n]
ans[1::2] = nums[n:]
return ans
if __name__ == "__main__":
so = Solution()
print(so.shuffle(nums = [2,5,1,3,4,7], n = 3))
| 36.906977
| 85
| 0.57719
| 247
| 1,587
| 3.676113
| 0.230769
| 0.079295
| 0.132159
| 0.229075
| 0.742291
| 0.742291
| 0.71696
| 0.71696
| 0.71696
| 0.71696
| 0
| 0.066845
| 0.293006
| 1,587
| 43
| 86
| 36.906977
| 0.742424
| 0.398236
| 0
| 0.413793
| 0
| 0
| 0.008448
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.137931
| false
| 0
| 0.034483
| 0
| 0.344828
| 0.034483
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
fca6b4abd9a3d599a91d273c5c04d4c4a0463e16
| 164
|
py
|
Python
|
Tensorflow/my_tensorflow/src/regularizers/__init__.py
|
hywel1994/mac-workspace
|
10c20555104ce6ebba77657c7605ce2b7fa2fc34
|
[
"MIT"
] | 37
|
2018-10-18T06:19:08.000Z
|
2022-03-20T08:13:08.000Z
|
github/Algorithm_Interview_Notes-Chinese/_codes/my_tensorflow/src/regularizers/__init__.py
|
keeya/Interview
|
2b40108d39de9ca9e9ab069edb9d4fcf9fe5760c
|
[
"MIT"
] | 1
|
2019-01-27T14:21:21.000Z
|
2019-01-27T14:21:21.000Z
|
github/Algorithm_Interview_Notes-Chinese/_codes/my_tensorflow/src/regularizers/__init__.py
|
keeya/Interview
|
2b40108d39de9ca9e9ab069edb9d4fcf9fe5760c
|
[
"MIT"
] | 13
|
2018-10-23T12:39:55.000Z
|
2022-02-25T10:54:01.000Z
|
"""
正则化函数
`Tensor -> Tensor or None`
Examples:
l2_regularizer = l2(0.01)
tf.get_variable(..., regularizer=l2_regularizer, ...)
"""
from .L1L2 import *
| 16.4
| 57
| 0.628049
| 20
| 164
| 5
| 0.75
| 0.26
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.060606
| 0.195122
| 164
| 9
| 58
| 18.222222
| 0.69697
| 0.823171
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
5dccee1d5537a67eb75dd0175f90ce68d50a316c
| 8,572
|
py
|
Python
|
tasks/libs/version_tests.py
|
Taiki-San/datadog-agent
|
40aa7e6d8a663dfda52ee5d35179fccc15e7ff51
|
[
"Apache-2.0"
] | 1,611
|
2017-09-28T15:07:39.000Z
|
2022-03-31T18:23:05.000Z
|
tasks/libs/version_tests.py
|
Taiki-San/datadog-agent
|
40aa7e6d8a663dfda52ee5d35179fccc15e7ff51
|
[
"Apache-2.0"
] | 3,946
|
2017-09-28T14:45:19.000Z
|
2022-03-31T20:19:36.000Z
|
tasks/libs/version_tests.py
|
Taiki-San/datadog-agent
|
40aa7e6d8a663dfda52ee5d35179fccc15e7ff51
|
[
"Apache-2.0"
] | 916
|
2017-10-17T23:18:48.000Z
|
2022-03-30T09:26:14.000Z
|
import random
import unittest
from .version import Version
class TestVersionComparison(unittest.TestCase):
def _get_version(self, major, minor, patch, rc, devel):
return Version(major, minor, patch=patch, rc=rc, devel=devel)
def _get_random_version(self):
return self._get_version(
random.randint(0, 99),
random.randint(0, 99),
random.randint(0, 99),
# For tests, rc must be non-0, as 0 signifies a release version, which would
# break some tests like test_rc_higher and test_rc_lower
random.randint(1, 99),
random.choice([True, False]),
)
def test_major_lower(self):
version = self._get_random_version()
increment = random.randint(1, 99)
self.assertFalse(
self._get_version(version.major, version.minor, version.patch, version.rc, version.devel)
> self._get_version(version.major + increment, version.minor, version.patch, version.rc, version.devel)
)
def test_major_higher(self):
version = self._get_random_version()
increment = random.randint(1, 99)
self.assertTrue(
self._get_version(version.major + increment, version.minor, version.patch, version.rc, version.devel)
> self._get_version(version.major, version.minor, version.patch, version.rc, version.devel)
)
def test_minor_lower(self):
version = self._get_random_version()
increment = random.randint(1, 99)
self.assertFalse(
self._get_version(version.major, version.minor, version.patch, version.rc, version.devel)
> self._get_version(version.major, version.minor + increment, version.patch, version.rc, version.devel)
)
def test_minor_higher(self):
version = self._get_random_version()
increment = random.randint(1, 99)
self.assertTrue(
self._get_version(version.major, version.minor + increment, version.patch, version.rc, version.devel)
> self._get_version(version.major, version.minor, version.patch, version.rc, version.devel)
)
def test_patch_lower(self):
version = self._get_random_version()
increment = random.randint(1, 99)
self.assertFalse(
self._get_version(version.major, version.minor, version.patch, version.rc, version.devel)
> self._get_version(version.major, version.minor, version.patch + increment, version.rc, version.devel)
)
def test_patch_higher(self):
version = self._get_random_version()
increment = random.randint(1, 99)
self.assertTrue(
self._get_version(version.major, version.minor, version.patch + increment, version.rc, version.devel)
> self._get_version(version.major, version.minor, version.patch, version.rc, version.devel)
)
def test_rc_lower_than_release(self):
version = self._get_random_version()
self.assertFalse(
self._get_version(version.major, version.minor, version.patch, version.rc, version.devel)
> self._get_version(version.major, version.minor, version.patch, None, version.devel)
)
def test_release_higher_than_rc(self):
version = self._get_random_version()
self.assertTrue(
self._get_version(version.major, version.minor, version.patch, None, version.devel)
> self._get_version(version.major, version.minor, version.patch, version.rc, version.devel)
)
def test_rc_lower(self):
version = self._get_random_version()
increment = random.randint(1, 99)
self.assertFalse(
self._get_version(version.major, version.minor, version.patch, version.rc, version.devel)
> self._get_version(version.major, version.minor, version.patch, version.rc + increment, version.devel)
)
def test_rc_higher(self):
version = self._get_random_version()
increment = random.randint(1, 99)
self.assertTrue(
self._get_version(version.major, version.minor, version.patch, version.rc + increment, version.devel)
> self._get_version(version.major, version.minor, version.patch, version.rc, version.devel)
)
def test_equal(self):
version = self._get_random_version()
self.assertFalse(
self._get_version(version.major, version.minor, version.patch, version.rc, version.devel)
> self._get_version(version.major, version.minor, version.patch, version.rc, version.devel)
)
def test_absent_patch_equal_zero(self):
version = self._get_random_version()
self.assertFalse(
self._get_version(version.major, version.minor, None, None, version.devel)
> self._get_version(version.major, version.minor, 0, None, version.devel)
)
def test_absent_patch_less_than_any(self):
version = self._get_random_version()
increment = random.randint(1, 99)
self.assertTrue(
self._get_version(version.major, version.minor, version.patch + increment, None, version.devel)
> self._get_version(version.major, version.minor, None, None, version.devel)
)
def test_devel_less_than_any(self):
version = self._get_random_version()
self.assertTrue(
self._get_version(version.major, version.minor, version.patch, None, False)
> self._get_version(version.major, version.minor, version.patch, None, True)
)
def test_devel_less_than_rc(self):
version = self._get_random_version()
self.assertTrue(
self._get_version(version.major, version.minor, version.patch, version.rc, False)
> self._get_version(version.major, version.minor, version.patch, None, True)
)
def test_devel_equal(self):
version = self._get_random_version()
self.assertTrue(
self._get_version(version.major, version.minor, version.patch, None, True)
== self._get_version(version.major, version.minor, version.patch, None, True)
)
class TestNonDevelVersion(unittest.TestCase):
version = Version(major=1, minor=0, devel=True)
def test_non_devel_version(self):
new_version = self.version.non_devel_version()
expected_version = Version(major=1, minor=0) # 1.0.0
self.assertEqual(new_version, expected_version)
class TestNextVersion(unittest.TestCase):
version = Version(major=1, minor=0)
def test_next_version_major(self):
new_version = self.version.next_version(bump_major=True)
expected_version = Version(major=2, minor=0)
self.assertEqual(new_version, expected_version)
def test_next_version_minor(self):
new_version = self.version.next_version(bump_minor=True)
expected_version = Version(major=1, minor=1)
self.assertEqual(new_version, expected_version)
def test_next_version_patch(self):
new_version = self.version.next_version(bump_patch=True)
expected_version = Version(major=1, minor=0, patch=1)
self.assertEqual(new_version, expected_version)
def test_next_version_major_rc(self):
new_version = self.version.next_version(bump_major=True, rc=True)
expected_version = Version(major=2, minor=0, rc=1)
self.assertEqual(new_version, expected_version)
def test_next_version_minor_rc(self):
new_version = self.version.next_version(bump_minor=True, rc=True)
expected_version = Version(major=1, minor=1, rc=1)
self.assertEqual(new_version, expected_version)
def test_next_version_patch_rc(self):
new_version = self.version.next_version(bump_patch=True, rc=True)
expected_version = Version(major=1, minor=0, patch=1, rc=1)
self.assertEqual(new_version, expected_version)
def test_next_version_rc(self):
version = self.version.next_version(bump_patch=True, rc=True) # 1.0.1-rc.1
new_version = version.next_version(rc=True)
expected_version = Version(major=1, minor=0, patch=1, rc=2)
self.assertEqual(new_version, expected_version)
def test_next_version_promote_rc(self):
version = self.version.next_version(bump_patch=True, rc=True) # 1.0.1-rc.1
new_version = version.next_version(rc=False)
expected_version = Version(major=1, minor=0, patch=1)
self.assertEqual(new_version, expected_version)
if __name__ == '__main__':
unittest.main()
| 40.819048
| 115
| 0.673705
| 1,072
| 8,572
| 5.142724
| 0.059701
| 0.062217
| 0.148195
| 0.121894
| 0.902594
| 0.88645
| 0.879013
| 0.861963
| 0.817159
| 0.801923
| 0
| 0.012425
| 0.220719
| 8,572
| 209
| 116
| 41.014354
| 0.812874
| 0.018315
| 0
| 0.435583
| 0
| 0
| 0.000951
| 0
| 0
| 0
| 0
| 0
| 0.153374
| 1
| 0.165644
| false
| 0
| 0.018405
| 0.01227
| 0.226994
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f8df4a44bfc676df0719491c5c618876d5b9eadf
| 35,038
|
py
|
Python
|
src/test/python/org/o3project/odenos/core/component/network/flow/ofpflow/test_ofp_flow_match.py
|
o3project/odenos
|
837d0d3d3c37482e843c40c5eeeac10646e68c65
|
[
"Apache-2.0"
] | 26
|
2015-02-18T10:22:50.000Z
|
2020-06-18T05:07:54.000Z
|
src/test/python/org/o3project/odenos/core/component/network/flow/ofpflow/test_ofp_flow_match.py
|
o3project/odenos
|
837d0d3d3c37482e843c40c5eeeac10646e68c65
|
[
"Apache-2.0"
] | 45
|
2015-02-20T00:40:45.000Z
|
2021-12-14T21:07:57.000Z
|
src/test/python/org/o3project/odenos/core/component/network/flow/ofpflow/test_ofp_flow_match.py
|
o3project/odenos
|
837d0d3d3c37482e843c40c5eeeac10646e68c65
|
[
"Apache-2.0"
] | 30
|
2015-02-19T02:00:35.000Z
|
2017-02-18T15:28:09.000Z
|
# -*- coding:utf-8 -*-
# Copyright 2015 NEC Corporation. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
from org.o3project.odenos.core.component.network.flow.ofpflow.ofp_flow_match\
import OFPFlowMatch
import unittest
class OFPFlowMatchTest(unittest.TestCase):
def setUp(self):
self.target = OFPFlowMatch("OFPFlowMatch", "ofp_in_node",
"ofp_in_port")
self.target.in_phy_port = "in_phy_port"
self.target.metadata = 11
self.target.metadata_mask = 12
self.target.eth_src = "eth_src"
self.target.eth_src_mask = "eth_src_mask"
self.target.eth_dst = "eth_dst"
self.target.eth_dst_mask = "eth_dst_mask"
self.target.vlan_vid = 13
self.target.vlan_vid_mask = 14
self.target.vlan_pcp = 15
self.target.eth_type = 16
self.target.ip_dscp = 21
self.target.ip_ecn = 22
self.target.ip_proto = 23
self.target.ipv4_src = "ipv4_src"
self.target.ipv4_src_mask = "ipv4_src_mask"
self.target.ipv4_dst = "ipv4_dst"
self.target.ipv4_dst_mask = "ipv4_dst_mask"
self.target.tcp_src = 31
self.target.tcp_dst = 32
self.target.udp_src = 33
self.target.udp_dst = 34
self.target.sctp_src = 35
self.target.sctp_dst = 36
self.target.icmpv4_type = 37
self.target.icmpv4_code = 38
self.target.arp_op = 40
self.target.arp_spa = "arp_spa"
self.target.arp_spa_mask = "arp_spa_mask"
self.target.arp_tpa = "arp_tpa"
self.target.arp_tpa_mask = "arp_tpa_mask"
self.target.arp_sha = "arp_sha"
self.target.arp_sha_mask = "arp_sha_mask"
self.target.arp_tha = "arp_tha"
self.target.arp_tha_mask = "arp_tha_mask"
self.target.ipv6_src = "ipv6_src"
self.target.ipv6_src_mask = "ipv6_src_mask"
self.target.ipv6_dst = "ipv6_dst"
self.target.ipv6_dst_mask = "ipv6_dst_mask"
self.target.ipv6_flabel = 50
self.target.ipv6_flabel_mask = 51
self.target.icmpv6_type = 52
self.target.icmpv6_code = 53
self.target.ipv6_nd_target = "ipv6_nd_target"
self.target.ipv6_nd_sll = "ipv6_nd_sll"
self.target.ipv6_nd_tll = "ipv6_nd_tll"
self.target.mpls_label = 54
self.target.mpls_tc = 55
self.target.mpls_bos = 56
self.target.pbb_isid = 57
self.target.pbb_isid_mask = 58
self.target.tunnel_id = 59
self.target.tunnel_id_mask = 60
self.target.ipv6_exthdr = 61
self.target.ipv6_exthdr_mask = 62
def tearDown(self):
self.target = None
def test_constractor_Not_None(self):
self.assertEqual(self.target._body[OFPFlowMatch.TYPE], "OFPFlowMatch")
self.assertEqual(self.target._body[OFPFlowMatch.IN_NODE], "ofp_in_node")
self.assertEqual(self.target._body[OFPFlowMatch.IN_PORT], "ofp_in_port")
self.assertEqual(self.target._body[OFPFlowMatch.IN_PHY_PORT], "in_phy_port")
self.assertEqual(self.target._body[OFPFlowMatch.METADATA], 11)
self.assertEqual(self.target._body[OFPFlowMatch.METADATA_MASK], 12)
self.assertEqual(self.target._body[OFPFlowMatch.ETH_SRC], "eth_src")
self.assertEqual(self.target._body[OFPFlowMatch.ETH_SRC_MASK], "eth_src_mask")
self.assertEqual(self.target._body[OFPFlowMatch.ETH_DST], "eth_dst")
self.assertEqual(self.target._body[OFPFlowMatch.ETH_DST_MASK], "eth_dst_mask")
self.assertEqual(self.target._body[OFPFlowMatch.VLAN_VID], 13)
self.assertEqual(self.target._body[OFPFlowMatch.VLAN_VID_MASK], 14)
self.assertEqual(self.target._body[OFPFlowMatch.VLAN_PCP], 15)
self.assertEqual(self.target._body[OFPFlowMatch.ETH_TYPE], 16)
self.assertEqual(self.target._body[OFPFlowMatch.IP_DSCP], 21)
self.assertEqual(self.target._body[OFPFlowMatch.IP_ECN], 22)
self.assertEqual(self.target._body[OFPFlowMatch.IP_PROTO], 23)
self.assertEqual(self.target._body[OFPFlowMatch.IPV4_SRC], "ipv4_src")
self.assertEqual(self.target._body[OFPFlowMatch.IPV4_SRC_MASK], "ipv4_src_mask")
self.assertEqual(self.target._body[OFPFlowMatch.IPV4_DST], "ipv4_dst")
self.assertEqual(self.target._body[OFPFlowMatch.IPV4_DST_MASK], "ipv4_dst_mask")
self.assertEqual(self.target._body[OFPFlowMatch.TCP_SRC], 31)
self.assertEqual(self.target._body[OFPFlowMatch.TCP_DST], 32)
self.assertEqual(self.target._body[OFPFlowMatch.UDP_SRC], 33)
self.assertEqual(self.target._body[OFPFlowMatch.UDP_DST], 34)
self.assertEqual(self.target._body[OFPFlowMatch.SCTP_SRC], 35)
self.assertEqual(self.target._body[OFPFlowMatch.SCTP_DST], 36)
self.assertEqual(self.target._body[OFPFlowMatch.ICMPV4_TYPE], 37)
self.assertEqual(self.target._body[OFPFlowMatch.ICMPV4_CODE], 38)
self.assertEqual(self.target._body[OFPFlowMatch.ARP_OP], 40)
self.assertEqual(self.target._body[OFPFlowMatch.ARP_SPA], "arp_spa")
self.assertEqual(self.target._body[OFPFlowMatch.ARP_SPA_MASK], "arp_spa_mask")
self.assertEqual(self.target._body[OFPFlowMatch.ARP_TPA], "arp_tpa")
self.assertEqual(self.target._body[OFPFlowMatch.ARP_TPA_MASK], "arp_tpa_mask")
self.assertEqual(self.target._body[OFPFlowMatch.ARP_SHA], "arp_sha")
self.assertEqual(self.target._body[OFPFlowMatch.ARP_SHA_MASK], "arp_sha_mask")
self.assertEqual(self.target._body[OFPFlowMatch.ARP_THA], "arp_tha")
self.assertEqual(self.target._body[OFPFlowMatch.ARP_THA_MASK], "arp_tha_mask")
self.assertEqual(self.target._body[OFPFlowMatch.IPV6_SRC], "ipv6_src")
self.assertEqual(self.target._body[OFPFlowMatch.IPV6_SRC_MASK], "ipv6_src_mask")
self.assertEqual(self.target._body[OFPFlowMatch.IPV6_DST], "ipv6_dst")
self.assertEqual(self.target._body[OFPFlowMatch.IPV6_DST_MASK], "ipv6_dst_mask")
self.assertEqual(self.target._body[OFPFlowMatch.IPV6_FLABEL], 50)
self.assertEqual(self.target._body[OFPFlowMatch.IPV6_FLABEL_MASK], 51)
self.assertEqual(self.target._body[OFPFlowMatch.ICMPV6_TYPE], 52)
self.assertEqual(self.target._body[OFPFlowMatch.ICMPV6_CODE], 53)
self.assertEqual(self.target._body[OFPFlowMatch.IPV6_ND_TARGET], "ipv6_nd_target")
self.assertEqual(self.target._body[OFPFlowMatch.IPV6_ND_SLL], "ipv6_nd_sll")
self.assertEqual(self.target._body[OFPFlowMatch.IPV6_ND_TLL], "ipv6_nd_tll")
self.assertEqual(self.target._body[OFPFlowMatch.MPLS_LABEL], 54)
self.assertEqual(self.target._body[OFPFlowMatch.MPLS_TC], 55)
self.assertEqual(self.target._body[OFPFlowMatch.MPLS_BOS], 56)
self.assertEqual(self.target._body[OFPFlowMatch.PBB_ISID], 57)
self.assertEqual(self.target._body[OFPFlowMatch.PBB_ISID_MASK], 58)
self.assertEqual(self.target._body[OFPFlowMatch.TUNNEL_ID], 59)
self.assertEqual(self.target._body[OFPFlowMatch.TUNNEL_ID_MASK], 60)
self.assertEqual(self.target._body[OFPFlowMatch.IPV6_EXTHDR], 61)
self.assertEqual(self.target._body[OFPFlowMatch.IPV6_EXTHDR_MASK], 62)
def test_constractor_None(self):
self.target = OFPFlowMatch("OFPFlowMatch", "ofp_in_node",
"ofp_in_port")
self.assertEqual(self.target._body, {"in_node": "ofp_in_node",
"type": "OFPFlowMatch",
"in_port": "ofp_in_port"})
# IN_PHY_PORT
def test_in_phy_port(self):
self.assertEqual(self.target.in_phy_port, "in_phy_port")
def test_in_phy_port_None(self):
del self.target._body[OFPFlowMatch.IN_PHY_PORT]
self.assertEqual(self.target.in_phy_port, None)
# METADATA
def test_metadata(self):
self.assertEqual(self.target.metadata, 11)
def test_metadata_None(self):
del self.target._body[OFPFlowMatch.METADATA]
self.assertEqual(self.target.metadata, None)
# METADATA_MASK
def test_metadata_mask(self):
self.assertEqual(self.target.metadata_mask, 12)
def test_metadata_mask_None(self):
del self.target._body[OFPFlowMatch.METADATA_MASK]
self.assertEqual(self.target.metadata_mask, None)
# ETH_SRC
def test_eth_src(self):
self.assertEqual(self.target.eth_src, "eth_src")
def test_eth_src_None(self):
del self.target._body[OFPFlowMatch.ETH_SRC]
self.assertEqual(self.target.eth_src, None)
# ETH_SRC_MASK
def test_eth_src_mask(self):
self.assertEqual(self.target.eth_src_mask, "eth_src_mask")
def test_eth_src_mask_None(self):
del self.target._body[OFPFlowMatch.ETH_SRC_MASK]
self.assertEqual(self.target.eth_src_mask, None)
# ETH_DST
def test_dl_dst(self):
self.assertEqual(self.target.eth_dst, "eth_dst")
def test_dl_dst_None(self):
del self.target._body[OFPFlowMatch.ETH_DST]
self.assertEqual(self.target.eth_dst, None)
# ETH_DST_MASK
def test_eth_dst_mask(self):
self.assertEqual(self.target.eth_dst_mask, "eth_dst_mask")
def test_eth_dst_mask_None(self):
del self.target._body[OFPFlowMatch.ETH_DST_MASK]
self.assertEqual(self.target.eth_dst_mask, None)
# VLAN_VID
def test_vlan_vid(self):
self.assertEqual(self.target.vlan_vid, 13)
def test_vlan_vid_None(self):
del self.target._body[OFPFlowMatch.VLAN_VID]
self.assertEqual(self.target.vlan_vid, None)
# VLAN_VID_MASK
def test_vlan_vid_mask(self):
self.assertEqual(self.target.vlan_vid_mask, 14)
def test_vlan_vid_mask_None(self):
del self.target._body[OFPFlowMatch.VLAN_VID_MASK]
self.assertEqual(self.target.vlan_vid_mask, None)
# VLAN_PCP
def test_vlan_pcp(self):
self.assertEqual(self.target.vlan_pcp, 15)
def test_vlan_pcp_None(self):
del self.target._body[OFPFlowMatch.VLAN_PCP]
self.assertEqual(self.target.vlan_pcp, None)
# ETH_TYPE
def test_eth_type(self):
self.assertEqual(self.target.eth_type, 16)
def test_eth_type_None(self):
del self.target._body[OFPFlowMatch.ETH_TYPE]
self.assertEqual(self.target.eth_type, None)
# IP_DSCP
def test_ip_dscp(self):
self.assertEqual(self.target.ip_dscp, 21)
def test_ip_dscp_None(self):
del self.target._body[OFPFlowMatch.IP_DSCP]
self.assertEqual(self.target.ip_dscp, None)
# IP_ECN
def test_ip_ecn(self):
self.assertEqual(self.target.ip_ecn, 22)
def test_ip_ecn_None(self):
del self.target._body[OFPFlowMatch.IP_ECN]
self.assertEqual(self.target.ip_ecn, None)
# IP_PROTO
def test_ip_proto(self):
self.assertEqual(self.target.ip_proto, 23)
def test_ip_proto_None(self):
del self.target._body[OFPFlowMatch.IP_PROTO]
self.assertEqual(self.target.ip_proto, None)
# IPV4_SRC
def test_ipv4_src(self):
self.assertEqual(self.target.ipv4_src, "ipv4_src")
def test_ipv4_src_None(self):
del self.target._body[OFPFlowMatch.IPV4_SRC]
self.assertEqual(self.target.ipv4_src, None)
# IPV4_SRC_MASK
def test_ipv4_src_mask(self):
self.assertEqual(self.target.ipv4_src_mask, "ipv4_src_mask")
def test_ipv4_src_mask_None(self):
del self.target._body[OFPFlowMatch.IPV4_SRC_MASK]
self.assertEqual(self.target.ipv4_src_mask, None)
# IPV4_DST
def test_ipv4_dst_mask(self):
self.assertEqual(self.target.ipv4_dst, "ipv4_dst")
def test_ipv4_dst_None(self):
del self.target._body[OFPFlowMatch.IPV4_DST]
self.assertEqual(self.target.ipv4_dst, None)
# IPV4_DST_MASK
def test_ipv4_dst_mask_mask(self):
self.assertEqual(self.target.ipv4_dst_mask, "ipv4_dst_mask")
def test_ipv4_dst_mask_None(self):
del self.target._body[OFPFlowMatch.IPV4_DST_MASK]
self.assertEqual(self.target.ipv4_dst_mask, None)
# TCP_SRC
def test_tcp_src(self):
self.assertEqual(self.target.tcp_src, 31)
def test_tcp_src_None(self):
del self.target._body[OFPFlowMatch.TCP_SRC]
self.assertEqual(self.target.tcp_src, None)
# TCP_DST
def test_tcp_dst(self):
self.assertEqual(self.target.tcp_dst, 32)
def test_tcp_dst_None(self):
del self.target._body[OFPFlowMatch.TCP_DST]
self.assertEqual(self.target.tcp_dst, None)
# UDP_SRC
def test_udp_src(self):
self.assertEqual(self.target.udp_src, 33)
def test_udp_src_None(self):
del self.target._body[OFPFlowMatch.UDP_SRC]
self.assertEqual(self.target.udp_src, None)
# UDP_DST
def test_udp_dst(self):
self.assertEqual(self.target.udp_dst, 34)
def test_udp_dst_None(self):
del self.target._body[OFPFlowMatch.UDP_DST]
self.assertEqual(self.target.udp_dst, None)
# SCTP_SRC
def test_sctp_src(self):
self.assertEqual(self.target.sctp_src, 35)
def test_sctp_src_None(self):
del self.target._body[OFPFlowMatch.SCTP_SRC]
self.assertEqual(self.target.sctp_src, None)
# SCTP_DST
def test_sctp_dst(self):
self.assertEqual(self.target.sctp_dst, 36)
def test_sctp_dst_None(self):
del self.target._body[OFPFlowMatch.SCTP_DST]
self.assertEqual(self.target.sctp_dst, None)
# ICMPV4_TYPE
def test_icmpv4_type(self):
self.assertEqual(self.target.icmpv4_type, 37)
def test_icmpv4_type_None(self):
del self.target._body[OFPFlowMatch.ICMPV4_TYPE]
self.assertEqual(self.target.icmpv4_type, None)
# ICMPV4_CODE
def test_icmpv4_code(self):
self.assertEqual(self.target.icmpv4_code, 38)
def test_icmpv4_code_None(self):
del self.target._body[OFPFlowMatch.ICMPV4_CODE]
self.assertEqual(self.target.icmpv4_code, None)
# ARP_OP
def test_arp_op(self):
self.assertEqual(self.target.arp_op, 40)
def test_arp_op_None(self):
del self.target._body[OFPFlowMatch.ARP_OP]
self.assertEqual(self.target.arp_op, None)
# ARP_SPA
def test_arp_spa(self):
self.assertEqual(self.target.arp_spa, "arp_spa")
def test_arp_spa_None(self):
del self.target._body[OFPFlowMatch.ARP_SPA]
self.assertEqual(self.target.arp_spa, None)
# ARP_SPA_MASK
def test_arp_spa_mask(self):
self.assertEqual(self.target.arp_spa_mask, "arp_spa_mask")
def test_arp_spa_mask_None(self):
del self.target._body[OFPFlowMatch.ARP_SPA_MASK]
self.assertEqual(self.target.arp_spa_mask, None)
# ARP_TPA
def test_arp_tpa(self):
self.assertEqual(self.target.arp_tpa, "arp_tpa")
def test_arp_tpa_None(self):
del self.target._body[OFPFlowMatch.ARP_TPA]
self.assertEqual(self.target.arp_tpa, None)
# ARP_TPA_MASK
def test_arp_tpa_mask(self):
self.assertEqual(self.target.arp_tpa_mask, "arp_tpa_mask")
def test_arp_tpa_mask_None(self):
del self.target._body[OFPFlowMatch.ARP_TPA_MASK]
self.assertEqual(self.target.arp_tpa_mask, None)
# ARP_SHA
def test_arp_sha(self):
self.assertEqual(self.target.arp_sha, "arp_sha")
def test_arp_sha_None(self):
del self.target._body[OFPFlowMatch.ARP_SHA]
self.assertEqual(self.target.arp_sha, None)
# ARP_SHA_MASK
def test_arp_sha_mask(self):
self.assertEqual(self.target.arp_sha_mask, "arp_sha_mask")
def test_arp_sha_mask_None(self):
del self.target._body[OFPFlowMatch.ARP_SHA_MASK]
self.assertEqual(self.target.arp_sha_mask, None)
# ARP_THA
def test_arp_tha(self):
self.assertEqual(self.target.arp_tha, "arp_tha")
def test_arp_tha_None(self):
del self.target._body[OFPFlowMatch.ARP_THA]
self.assertEqual(self.target.arp_tha, None)
# ARP_THA_MASK
def test_arp_tha_mask(self):
self.assertEqual(self.target.arp_tha_mask, "arp_tha_mask")
def test_arp_tha_mask_None(self):
del self.target._body[OFPFlowMatch.ARP_THA_MASK]
self.assertEqual(self.target.arp_tha_mask, None)
# IPV6_SRC
def test_ipv6_src(self):
self.assertEqual(self.target.ipv6_src, "ipv6_src")
def test_ipv6_src_None(self):
del self.target._body[OFPFlowMatch.IPV6_SRC]
self.assertEqual(self.target.ipv6_src, None)
# IPV6_SRC_MASK
def test_ipv6_src_mask(self):
self.assertEqual(self.target.ipv6_src_mask, "ipv6_src_mask")
def test_ipv6_src_mask_None(self):
del self.target._body[OFPFlowMatch.IPV6_SRC_MASK]
self.assertEqual(self.target.ipv6_src_mask, None)
# IPV6_DST
def test_ipv6_dst(self):
self.assertEqual(self.target.ipv6_dst, "ipv6_dst")
def test_ipv6_dst_None(self):
del self.target._body[OFPFlowMatch.IPV6_DST]
self.assertEqual(self.target.ipv6_dst, None)
# IPV6_DST_MASK
def test_ipv6_dst_mask(self):
self.assertEqual(self.target.ipv6_dst_mask, "ipv6_dst_mask")
def test_ipv6_dst_mask_None(self):
del self.target._body[OFPFlowMatch.IPV6_DST_MASK]
self.assertEqual(self.target.ipv6_dst_mask, None)
# IPV6_FLABEL
def test_ipv6_flabel(self):
self.assertEqual(self.target.ipv6_flabel, 50)
def test_ipv6_flabel_None(self):
del self.target._body[OFPFlowMatch.IPV6_FLABEL]
self.assertEqual(self.target.ipv6_flabel, None)
# IPV6_FLABEL_MASK
def test_ipv6_flabel_mask(self):
self.assertEqual(self.target.ipv6_flabel_mask, 51)
def test_ipv6_flabel_mask_None(self):
del self.target._body[OFPFlowMatch.IPV6_FLABEL_MASK]
self.assertEqual(self.target.ipv6_flabel_mask, None)
# ICMPV6_TYPE
def test_icmpv6_type(self):
self.assertEqual(self.target.icmpv6_type, 52)
def test_icmpv6_type_None(self):
del self.target._body[OFPFlowMatch.ICMPV6_TYPE]
self.assertEqual(self.target.icmpv6_type, None)
# ICMPV6_CODE
def test_icmpv6_code(self):
self.assertEqual(self.target.icmpv6_code, 53)
def test_icmpv6_code_None(self):
del self.target._body[OFPFlowMatch.ICMPV6_CODE]
self.assertEqual(self.target.icmpv6_code, None)
# IPV6_ND_TARGET
def test_ipv6_nd_target(self):
self.assertEqual(self.target.ipv6_nd_target, "ipv6_nd_target")
def test_ipv6_nd_target_None(self):
del self.target._body[OFPFlowMatch.IPV6_ND_TARGET]
self.assertEqual(self.target.ipv6_nd_target, None)
# IPV6_ND_SLL
def test_ipv6_nd_sll(self):
self.assertEqual(self.target.ipv6_nd_sll, "ipv6_nd_sll")
def test_ipv6_nd_sll_None(self):
del self.target._body[OFPFlowMatch.IPV6_ND_SLL]
self.assertEqual(self.target.ipv6_nd_sll, None)
# IPV6_ND_TLL
def test_ipv6_nd_tll(self):
self.assertEqual(self.target.ipv6_nd_tll, "ipv6_nd_tll")
def test_ipv6_nd_tll_None(self):
del self.target._body[OFPFlowMatch.IPV6_ND_TLL]
self.assertEqual(self.target.ipv6_nd_tll, None)
# MPLS_LABEL
def test_mpls_label(self):
self.assertEqual(self.target.mpls_label, 54)
def test_mpls_label_None(self):
del self.target._body[OFPFlowMatch.MPLS_LABEL]
self.assertEqual(self.target.mpls_label, None)
# MPLS_TC
def test_mpls_tc(self):
self.assertEqual(self.target.mpls_tc, 55)
def test_mpls_tc_None(self):
del self.target._body[OFPFlowMatch.MPLS_TC]
self.assertEqual(self.target.mpls_tc, None)
# MPLS_BOS
def test_mpls_bos(self):
self.assertEqual(self.target.mpls_bos, 56)
def test_mpls_bos_None(self):
del self.target._body[OFPFlowMatch.MPLS_BOS]
self.assertEqual(self.target.mpls_bos, None)
# PBB_ISID
def test_pbb_isid(self):
self.assertEqual(self.target.pbb_isid, 57)
def test_pbb_isid_None(self):
del self.target._body[OFPFlowMatch.PBB_ISID]
self.assertEqual(self.target.pbb_isid, None)
# PBB_ISID_MASK
def test_pbb_isid_mask(self):
self.assertEqual(self.target.pbb_isid_mask, 58)
def test_pbb_isid_mask_None(self):
del self.target._body[OFPFlowMatch.PBB_ISID_MASK]
self.assertEqual(self.target.pbb_isid_mask, None)
# TUNNEL_ID
def test_tunnel_id(self):
self.assertEqual(self.target.tunnel_id, 59)
def test_tunnel_id_None(self):
del self.target._body[OFPFlowMatch.TUNNEL_ID]
self.assertEqual(self.target.tunnel_id, None)
# TUNNEL_ID_MASK
def test_tunnel_id_mask(self):
self.assertEqual(self.target.tunnel_id_mask, 60)
def test_tunnel_id_mask_None(self):
del self.target._body[OFPFlowMatch.TUNNEL_ID_MASK]
self.assertEqual(self.target.tunnel_id_mask, None)
# IPV6_EXTHDR
def test_ipv6_exthdr(self):
self.assertEqual(self.target.ipv6_exthdr, 61)
def test_ipv6_exthdr_None(self):
del self.target._body[OFPFlowMatch.IPV6_EXTHDR]
self.assertEqual(self.target.ipv6_exthdr, None)
# IPV6_EXTHDR_MASK
def test_ipv6_exthdr_mask(self):
self.assertEqual(self.target.ipv6_exthdr_mask, 62)
def test_ipv6_exthdr_mask_None(self):
del self.target._body[OFPFlowMatch.IPV6_EXTHDR_MASK]
self.assertEqual(self.target.ipv6_exthdr_mask, None)
def test_create_from_packed_Not_None(self):
self.value = {
OFPFlowMatch.TYPE: "OFPFlowMatch",
OFPFlowMatch.IN_NODE: "ofp_in_node",
OFPFlowMatch.IN_PORT: "ofp_in_port",
OFPFlowMatch.IN_PHY_PORT: "in_phy_port",
OFPFlowMatch.METADATA: 11,
OFPFlowMatch.METADATA_MASK: 12,
OFPFlowMatch.ETH_SRC: "eth_src",
OFPFlowMatch.ETH_SRC_MASK: "eth_src_mask",
OFPFlowMatch.ETH_DST: "eth_dst",
OFPFlowMatch.ETH_DST_MASK: "eth_dst_mask",
OFPFlowMatch.VLAN_VID: 13,
OFPFlowMatch.VLAN_VID_MASK: 14,
OFPFlowMatch.VLAN_PCP: 15,
OFPFlowMatch.ETH_TYPE: 16,
OFPFlowMatch.IP_DSCP: 21,
OFPFlowMatch.IP_ECN: 22,
OFPFlowMatch.IP_PROTO: 23,
OFPFlowMatch.IPV4_SRC: "ipv4_src",
OFPFlowMatch.IPV4_SRC_MASK: "ipv4_src_mask",
OFPFlowMatch.IPV4_DST: "ipv4_dst",
OFPFlowMatch.IPV4_DST_MASK: "ipv4_dst_mask",
OFPFlowMatch.TCP_SRC: 31,
OFPFlowMatch.TCP_DST: 32,
OFPFlowMatch.UDP_SRC: 33,
OFPFlowMatch.UDP_DST: 34,
OFPFlowMatch.SCTP_SRC: 35,
OFPFlowMatch.SCTP_DST: 36,
OFPFlowMatch.ICMPV4_TYPE: 37,
OFPFlowMatch.ICMPV4_CODE: 38,
OFPFlowMatch.ARP_OP: 40,
OFPFlowMatch.ARP_SPA: "arp_spa",
OFPFlowMatch.ARP_SPA_MASK: "arp_spa_mask",
OFPFlowMatch.ARP_TPA: "arp_tpa",
OFPFlowMatch.ARP_TPA_MASK: "arp_tpa_mask",
OFPFlowMatch.ARP_SHA: "arp_sha",
OFPFlowMatch.ARP_SHA_MASK: "arp_sha_mask",
OFPFlowMatch.ARP_THA: "arp_tha",
OFPFlowMatch.ARP_THA_MASK: "arp_tha_mask",
OFPFlowMatch.IPV6_SRC: "ipv6_src",
OFPFlowMatch.IPV6_SRC_MASK: "ipv6_src_mask",
OFPFlowMatch.IPV6_DST: "ipv6_dst",
OFPFlowMatch.IPV6_DST_MASK: "ipv6_dst_mask",
OFPFlowMatch.IPV6_FLABEL: 50,
OFPFlowMatch.IPV6_FLABEL_MASK: 51,
OFPFlowMatch.ICMPV6_TYPE: 52,
OFPFlowMatch.ICMPV6_CODE: 53,
OFPFlowMatch.IPV6_ND_TARGET: "ipv6_nd_target",
OFPFlowMatch.IPV6_ND_SLL: "ipv6_nd_sll",
OFPFlowMatch.IPV6_ND_TLL: "ipv6_nd_tll",
OFPFlowMatch.MPLS_LABEL: 54,
OFPFlowMatch.MPLS_TC: 55,
OFPFlowMatch.MPLS_BOS: 56,
OFPFlowMatch.PBB_ISID: 57,
OFPFlowMatch.PBB_ISID_MASK: 58,
OFPFlowMatch.TUNNEL_ID: 59,
OFPFlowMatch.TUNNEL_ID_MASK: 60,
OFPFlowMatch.IPV6_EXTHDR: 61,
OFPFlowMatch.IPV6_EXTHDR_MASK: 62
}
self.result = OFPFlowMatch.create_from_packed(self.value)
self.assertEqual(self.result._body[OFPFlowMatch.TYPE], "OFPFlowMatch")
self.assertEqual(self.result._body[OFPFlowMatch.IN_NODE], "ofp_in_node")
self.assertEqual(self.result._body[OFPFlowMatch.IN_PORT], "ofp_in_port")
self.assertEqual(self.result._body[OFPFlowMatch.IN_PHY_PORT], "in_phy_port")
self.assertEqual(self.result._body[OFPFlowMatch.METADATA], 11)
self.assertEqual(self.result._body[OFPFlowMatch.METADATA_MASK], 12)
self.assertEqual(self.result._body[OFPFlowMatch.ETH_SRC], "eth_src")
self.assertEqual(self.result._body[OFPFlowMatch.ETH_SRC_MASK], "eth_src_mask")
self.assertEqual(self.result._body[OFPFlowMatch.ETH_DST], "eth_dst")
self.assertEqual(self.result._body[OFPFlowMatch.ETH_DST_MASK], "eth_dst_mask")
self.assertEqual(self.result._body[OFPFlowMatch.VLAN_VID], 13)
self.assertEqual(self.result._body[OFPFlowMatch.VLAN_VID_MASK], 14)
self.assertEqual(self.result._body[OFPFlowMatch.VLAN_PCP], 15)
self.assertEqual(self.result._body[OFPFlowMatch.ETH_TYPE], 16)
self.assertEqual(self.result._body[OFPFlowMatch.IP_DSCP], 21)
self.assertEqual(self.result._body[OFPFlowMatch.IP_ECN], 22)
self.assertEqual(self.result._body[OFPFlowMatch.IP_PROTO], 23)
self.assertEqual(self.result._body[OFPFlowMatch.IPV4_SRC], "ipv4_src")
self.assertEqual(self.result._body[OFPFlowMatch.IPV4_SRC_MASK], "ipv4_src_mask")
self.assertEqual(self.result._body[OFPFlowMatch.IPV4_DST], "ipv4_dst")
self.assertEqual(self.result._body[OFPFlowMatch.IPV4_DST_MASK], "ipv4_dst_mask")
self.assertEqual(self.result._body[OFPFlowMatch.TCP_SRC], 31)
self.assertEqual(self.result._body[OFPFlowMatch.TCP_DST], 32)
self.assertEqual(self.result._body[OFPFlowMatch.UDP_SRC], 33)
self.assertEqual(self.result._body[OFPFlowMatch.UDP_DST], 34)
self.assertEqual(self.result._body[OFPFlowMatch.SCTP_SRC], 35)
self.assertEqual(self.result._body[OFPFlowMatch.SCTP_DST], 36)
self.assertEqual(self.result._body[OFPFlowMatch.ICMPV4_TYPE], 37)
self.assertEqual(self.result._body[OFPFlowMatch.ICMPV4_CODE], 38)
self.assertEqual(self.result._body[OFPFlowMatch.ARP_OP], 40)
self.assertEqual(self.result._body[OFPFlowMatch.ARP_SPA], "arp_spa")
self.assertEqual(self.result._body[OFPFlowMatch.ARP_SPA_MASK], "arp_spa_mask")
self.assertEqual(self.result._body[OFPFlowMatch.ARP_TPA], "arp_tpa")
self.assertEqual(self.result._body[OFPFlowMatch.ARP_TPA_MASK], "arp_tpa_mask")
self.assertEqual(self.result._body[OFPFlowMatch.ARP_SHA], "arp_sha")
self.assertEqual(self.result._body[OFPFlowMatch.ARP_SHA_MASK], "arp_sha_mask")
self.assertEqual(self.result._body[OFPFlowMatch.ARP_THA], "arp_tha")
self.assertEqual(self.result._body[OFPFlowMatch.ARP_THA_MASK], "arp_tha_mask")
self.assertEqual(self.result._body[OFPFlowMatch.IPV6_SRC], "ipv6_src")
self.assertEqual(self.result._body[OFPFlowMatch.IPV6_SRC_MASK], "ipv6_src_mask")
self.assertEqual(self.result._body[OFPFlowMatch.IPV6_DST], "ipv6_dst")
self.assertEqual(self.result._body[OFPFlowMatch.IPV6_DST_MASK], "ipv6_dst_mask")
self.assertEqual(self.result._body[OFPFlowMatch.IPV6_FLABEL], 50)
self.assertEqual(self.result._body[OFPFlowMatch.IPV6_FLABEL_MASK], 51)
self.assertEqual(self.result._body[OFPFlowMatch.ICMPV6_TYPE], 52)
self.assertEqual(self.result._body[OFPFlowMatch.ICMPV6_CODE], 53)
self.assertEqual(self.result._body[OFPFlowMatch.IPV6_ND_TARGET], "ipv6_nd_target")
self.assertEqual(self.result._body[OFPFlowMatch.IPV6_ND_SLL], "ipv6_nd_sll")
self.assertEqual(self.result._body[OFPFlowMatch.IPV6_ND_TLL], "ipv6_nd_tll")
self.assertEqual(self.result._body[OFPFlowMatch.MPLS_LABEL], 54)
self.assertEqual(self.result._body[OFPFlowMatch.MPLS_TC], 55)
self.assertEqual(self.result._body[OFPFlowMatch.MPLS_BOS], 56)
self.assertEqual(self.result._body[OFPFlowMatch.PBB_ISID], 57)
self.assertEqual(self.result._body[OFPFlowMatch.PBB_ISID_MASK], 58)
self.assertEqual(self.result._body[OFPFlowMatch.TUNNEL_ID], 59)
self.assertEqual(self.result._body[OFPFlowMatch.TUNNEL_ID_MASK], 60)
self.assertEqual(self.result._body[OFPFlowMatch.IPV6_EXTHDR], 61)
self.assertEqual(self.result._body[OFPFlowMatch.IPV6_EXTHDR_MASK], 62)
def test_create_from_packed_None(self):
self.value = {OFPFlowMatch.TYPE: "OFPFlowMatch",
OFPFlowMatch.IN_NODE: "0456",
OFPFlowMatch.IN_PORT: "0789"}
self.result = OFPFlowMatch.create_from_packed(self.value)
self.assertEqual(self.result._body, {OFPFlowMatch.TYPE: "OFPFlowMatch",
OFPFlowMatch.IN_NODE: "0456",
OFPFlowMatch.IN_PORT: "0789"})
def test_packed_object(self):
self.result = self.target.packed_object()
self.assertEqual(self.result[OFPFlowMatch.TYPE], "OFPFlowMatch")
self.assertEqual(self.result[OFPFlowMatch.IN_NODE], "ofp_in_node")
self.assertEqual(self.result[OFPFlowMatch.IN_PORT], "ofp_in_port")
self.assertEqual(self.result[OFPFlowMatch.IN_PHY_PORT], "in_phy_port")
self.assertEqual(self.result[OFPFlowMatch.METADATA], 11)
self.assertEqual(self.result[OFPFlowMatch.METADATA_MASK], 12)
self.assertEqual(self.result[OFPFlowMatch.ETH_SRC], "eth_src")
self.assertEqual(self.result[OFPFlowMatch.ETH_SRC_MASK], "eth_src_mask")
self.assertEqual(self.result[OFPFlowMatch.ETH_DST], "eth_dst")
self.assertEqual(self.result[OFPFlowMatch.ETH_DST_MASK], "eth_dst_mask")
self.assertEqual(self.result[OFPFlowMatch.VLAN_VID], 13)
self.assertEqual(self.result[OFPFlowMatch.VLAN_VID_MASK], 14)
self.assertEqual(self.result[OFPFlowMatch.VLAN_PCP], 15)
self.assertEqual(self.result[OFPFlowMatch.ETH_TYPE], 16)
self.assertEqual(self.result[OFPFlowMatch.IP_DSCP], 21)
self.assertEqual(self.result[OFPFlowMatch.IP_ECN], 22)
self.assertEqual(self.result[OFPFlowMatch.IP_PROTO], 23)
self.assertEqual(self.result[OFPFlowMatch.IPV4_SRC], "ipv4_src")
self.assertEqual(self.result[OFPFlowMatch.IPV4_SRC_MASK], "ipv4_src_mask")
self.assertEqual(self.result[OFPFlowMatch.IPV4_DST], "ipv4_dst")
self.assertEqual(self.result[OFPFlowMatch.IPV4_DST_MASK], "ipv4_dst_mask")
self.assertEqual(self.result[OFPFlowMatch.TCP_SRC], 31)
self.assertEqual(self.result[OFPFlowMatch.TCP_DST], 32)
self.assertEqual(self.result[OFPFlowMatch.UDP_SRC], 33)
self.assertEqual(self.result[OFPFlowMatch.UDP_DST], 34)
self.assertEqual(self.result[OFPFlowMatch.SCTP_SRC], 35)
self.assertEqual(self.result[OFPFlowMatch.SCTP_DST], 36)
self.assertEqual(self.result[OFPFlowMatch.ICMPV4_TYPE], 37)
self.assertEqual(self.result[OFPFlowMatch.ICMPV4_CODE], 38)
self.assertEqual(self.result[OFPFlowMatch.ARP_OP], 40)
self.assertEqual(self.result[OFPFlowMatch.ARP_SPA], "arp_spa")
self.assertEqual(self.result[OFPFlowMatch.ARP_SPA_MASK], "arp_spa_mask")
self.assertEqual(self.result[OFPFlowMatch.ARP_TPA], "arp_tpa")
self.assertEqual(self.result[OFPFlowMatch.ARP_TPA_MASK], "arp_tpa_mask")
self.assertEqual(self.result[OFPFlowMatch.ARP_SHA], "arp_sha")
self.assertEqual(self.result[OFPFlowMatch.ARP_SHA_MASK], "arp_sha_mask")
self.assertEqual(self.result[OFPFlowMatch.ARP_THA], "arp_tha")
self.assertEqual(self.result[OFPFlowMatch.ARP_THA_MASK], "arp_tha_mask")
self.assertEqual(self.result[OFPFlowMatch.IPV6_SRC], "ipv6_src")
self.assertEqual(self.result[OFPFlowMatch.IPV6_SRC_MASK], "ipv6_src_mask")
self.assertEqual(self.result[OFPFlowMatch.IPV6_DST], "ipv6_dst")
self.assertEqual(self.result[OFPFlowMatch.IPV6_DST_MASK], "ipv6_dst_mask")
self.assertEqual(self.result[OFPFlowMatch.IPV6_FLABEL], 50)
self.assertEqual(self.result[OFPFlowMatch.IPV6_FLABEL_MASK], 51)
self.assertEqual(self.result[OFPFlowMatch.ICMPV6_TYPE], 52)
self.assertEqual(self.result[OFPFlowMatch.ICMPV6_CODE], 53)
self.assertEqual(self.result[OFPFlowMatch.IPV6_ND_TARGET], "ipv6_nd_target")
self.assertEqual(self.result[OFPFlowMatch.IPV6_ND_SLL], "ipv6_nd_sll")
self.assertEqual(self.result[OFPFlowMatch.IPV6_ND_TLL], "ipv6_nd_tll")
self.assertEqual(self.result[OFPFlowMatch.MPLS_LABEL], 54)
self.assertEqual(self.result[OFPFlowMatch.MPLS_TC], 55)
self.assertEqual(self.result[OFPFlowMatch.MPLS_BOS], 56)
self.assertEqual(self.result[OFPFlowMatch.PBB_ISID], 57)
self.assertEqual(self.result[OFPFlowMatch.PBB_ISID_MASK], 58)
self.assertEqual(self.result[OFPFlowMatch.TUNNEL_ID], 59)
self.assertEqual(self.result[OFPFlowMatch.TUNNEL_ID_MASK], 60)
self.assertEqual(self.result[OFPFlowMatch.IPV6_EXTHDR], 61)
self.assertEqual(self.result[OFPFlowMatch.IPV6_EXTHDR_MASK], 62)
if __name__ == '__main__':
unittest.main()
| 42.938725
| 90
| 0.680062
| 4,573
| 35,038
| 4.894817
| 0.038924
| 0.191655
| 0.242763
| 0.188751
| 0.888983
| 0.836356
| 0.711982
| 0.590198
| 0.312232
| 0.146533
| 0
| 0.024454
| 0.214539
| 35,038
| 815
| 91
| 42.991411
| 0.788888
| 0.045037
| 0
| 0.013468
| 0
| 0
| 0.05315
| 0
| 0
| 0
| 0
| 0
| 0.481481
| 1
| 0.19697
| false
| 0
| 0.003367
| 0
| 0.20202
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5d0f2bb186ffc7b9a92b6551237bf7d09fab5ce6
| 103
|
py
|
Python
|
blaze/compute/air/__init__.py
|
talumbau/blaze
|
66c9e61476f11d53f7b734664214537182397739
|
[
"BSD-3-Clause"
] | 1
|
2018-01-24T08:54:04.000Z
|
2018-01-24T08:54:04.000Z
|
blaze/compute/air/__init__.py
|
talumbau/blaze
|
66c9e61476f11d53f7b734664214537182397739
|
[
"BSD-3-Clause"
] | null | null | null |
blaze/compute/air/__init__.py
|
talumbau/blaze
|
66c9e61476f11d53f7b734664214537182397739
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import, division, print_function
from .entrypoint import compile, run
| 25.75
| 64
| 0.84466
| 13
| 103
| 6.230769
| 0.769231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116505
| 103
| 3
| 65
| 34.333333
| 0.89011
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
5d1bd9fe989869ca0c0660a2f03f1cd70651cc37
| 248
|
py
|
Python
|
core/src/zeit/content/text/browser/tests/test_doctest.py
|
rickdg/vivi
|
16134ac954bf8425646d4ad47bdd1f372e089355
|
[
"BSD-3-Clause"
] | 5
|
2019-05-16T09:51:29.000Z
|
2021-05-31T09:30:03.000Z
|
core/src/zeit/content/text/browser/tests/test_doctest.py
|
rickdg/vivi
|
16134ac954bf8425646d4ad47bdd1f372e089355
|
[
"BSD-3-Clause"
] | 107
|
2019-05-24T12:19:02.000Z
|
2022-03-23T15:05:56.000Z
|
core/src/zeit/content/text/browser/tests/test_doctest.py
|
rickdg/vivi
|
16134ac954bf8425646d4ad47bdd1f372e089355
|
[
"BSD-3-Clause"
] | 3
|
2020-08-14T11:01:17.000Z
|
2022-01-08T17:32:19.000Z
|
import zeit.cms.testing
import zeit.content.text.testing
def test_suite():
return zeit.cms.testing.FunctionalDocFileSuite(
'README.txt',
package='zeit.content.text.browser',
layer=zeit.content.text.testing.WSGI_LAYER)
| 24.8
| 51
| 0.717742
| 31
| 248
| 5.677419
| 0.548387
| 0.1875
| 0.255682
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.165323
| 248
| 9
| 52
| 27.555556
| 0.850242
| 0
| 0
| 0
| 0
| 0
| 0.141129
| 0.100806
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| true
| 0
| 0.285714
| 0.142857
| 0.571429
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
5d543c23a7e260984b7ac5f5a9a21d89b2db2216
| 6,236
|
py
|
Python
|
simulation/behaviors.py
|
tmsquill/grove
|
afe2d6ff35a2db7d8cb1e3ff7e5a0e3b29f369f7
|
[
"MIT"
] | 1
|
2016-11-23T16:59:01.000Z
|
2016-11-23T16:59:01.000Z
|
simulation/behaviors.py
|
zivia/grove
|
afe2d6ff35a2db7d8cb1e3ff7e5a0e3b29f369f7
|
[
"MIT"
] | 1
|
2020-04-26T19:28:42.000Z
|
2020-04-26T19:28:42.000Z
|
simulation/behaviors.py
|
zivia/grove
|
afe2d6ff35a2db7d8cb1e3ff7e5a0e3b29f369f7
|
[
"MIT"
] | null | null | null |
import entity
from utils import Point, rand
def move_north(agent=None, entities=None, environment=None):
"""
Behavior that causes an agent to move north one unit.
:param agent: The agent to perform the behavior.
:param entities: A list of entities in the simulation.
:param environment: The environment containing the agent.
:return: The updated agent.
"""
if environment.body.contains_point(Point(agent.body.top_left.position[0], agent.body.top_left.position[1] + 1)):
for item in agent.inventory:
item.body.top_left.position[1] += 1
item.body.bottom_right.position[1] += 1
agent.body.top_left.position[1] += 1
agent.body.bottom_right.position[1] += 1
agent.time += 1
return agent
def move_east(agent=None, entities=None, environment=None):
"""
Behavior that causes an agent to move east one unit.
:param agent: The agent to perform the behavior.
:param entities: A list of entities in the simulation.
:param environment: The environment containing the agent.
:return: The updated agent.
"""
if environment.body.contains_point(Point(agent.body.bottom_right.position[0] + 1, agent.body.bottom_right.position[1])):
for item in agent.inventory:
item.body.top_left.position[0] += 1
item.body.bottom_right.position[0] += 1
agent.body.top_left.position[0] += 1
agent.body.bottom_right.position[0] += 1
agent.time += 1
return agent
def move_south(agent=None, entities=None, environment=None):
"""
Behavior that causes an agent to move south one unit.
:param agent: The agent to perform the behavior.
:param entities: A list of entities in the simulation.
:param environment: The environment containing the agent.
:return: The updated agent.
"""
if environment.body.contains_point(Point(agent.body.bottom_right.position[0], agent.body.bottom_right.position[1] - 1)):
for item in agent.inventory:
item.body.top_left.position[1] -= 1
item.body.bottom_right.position[1] -= 1
agent.body.top_left.position[1] -= 1
agent.body.bottom_right.position[1] -= 1
agent.time += 1
return agent
def move_west(agent=None, entities=None, environment=None):
"""
Behavior that causes an agent to move west one unit.
:param agent: The agent to perform the behavior.
:param entities: A list of entities in the simulation.
:param environment: The environment containing the agent.
:return: The updated agent.
"""
if environment.body.contains_point(Point(agent.body.top_left.position[0] - 1, agent.body.top_left.position[1])):
for item in agent.inventory:
item.body.top_left.position[0] -= 1
item.body.bottom_right.position[0] -= 1
agent.body.top_left.position[0] -= 1
agent.body.bottom_right.position[0] -= 1
agent.time += 1
return agent
def pickup_food(agent=None, entities=None, environment=None):
"""
Behavior that causes an agent to pickup food.
:param agent: The agent to perform the behavior.
:param entities: A list of entities in the simulation.
:param environment: The environment containing the agent.
:return: The updated agent.
"""
foods = filter(lambda x: isinstance(x, entity.Food), entities)
if not agent.holding_food:
for food in foods:
if agent.body.contains_rectangle(food.body) and food.interactable:
agent.inventory.append(food)
food.interactable = False
agent.holding_food = True
break
agent.time += 1
return agent
def drop_food(agent=None, entities=None, environment=None):
"""
Behavior that causes an agent to drop food.
:param agent: The agent to perform the behavior.
:param entities: A list of entities in the simulation.
:param environment: The environment containing the agent.
:return: The updated agent.
"""
nest = filter(lambda x: isinstance(x, entity.Nest), entities)[0]
if agent.holding_food:
for item in agent.inventory:
agent.inventory.remove(item)
if nest.body.contains_rectangle(agent.body):
nest.food_count += 1
else:
item.interactable = True
agent.holding_food = False
agent.time += 1
return agent
def random_walk(agent=None, entities=None, environment=None):
"""
Behavior that causes an agent to walk in a random direction for one time step.
:param agent: The agent to perform the behavior.
:param entities: A list of entities in the simulation.
:param environment: The environment containing the agent.
:return: The updated agent.
"""
random_direction = rand.randint(0, 3)
if random_direction == 0:
return move_north(agent, entities, environment)
elif random_direction == 1:
return move_east(agent, entities, environment)
elif random_direction == 2:
return move_south(agent, entities, environment)
elif random_direction == 3:
return move_west(agent, entities, environment)
return agent
def return_home(agent=None, entities=None, environment=None):
"""
Behavior (naive) that causes an agent to return to the nest.
:param agent: The agent to perform the behavior.
:param entities: A list of entities in the simulation.
:param environment: The environment containing the agent.
:return: The updated agent.
"""
nest = filter(lambda x: isinstance(x, entity.Nest), entities)[0]
agent.time += int(agent.body.top_left.distance_to(nest.body.top_left))
agent.body.top_left.position[1] = (nest.body.top_left.position[1] + nest.body.bottom_right.position[1]) / 2
agent.body.bottom_right.position[0] = (nest.body.top_left.position[0] + nest.body.bottom_right.position[0]) / 2
agent.body.bottom_right.position[1] = (nest.body.top_left.position[1] + nest.body.bottom_right.position[1]) / 2
agent.body.top_left.position[0] = (nest.body.top_left.position[0] + nest.body.bottom_right.position[0]) / 2
return agent
| 28.605505
| 124
| 0.669179
| 854
| 6,236
| 4.807963
| 0.094848
| 0.050414
| 0.05358
| 0.083293
| 0.830005
| 0.819776
| 0.760594
| 0.736727
| 0.728933
| 0.726498
| 0
| 0.015618
| 0.229955
| 6,236
| 217
| 125
| 28.737327
| 0.839442
| 0.314785
| 0
| 0.2625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.025
| 0
| 0.275
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
538c39a701a8f10c699e5e979d44542d758f1f16
| 73
|
py
|
Python
|
src/cranet/metrics/__init__.py
|
shizuku/cranet
|
4c86ad16029ed76a74e22b5e5e4c21267d6b9996
|
[
"MIT"
] | 4
|
2021-10-31T13:31:13.000Z
|
2021-12-11T08:45:36.000Z
|
src/cranet/metrics/__init__.py
|
Azathoth1729/cranet
|
4c86ad16029ed76a74e22b5e5e4c21267d6b9996
|
[
"MIT"
] | null | null | null |
src/cranet/metrics/__init__.py
|
Azathoth1729/cranet
|
4c86ad16029ed76a74e22b5e5e4c21267d6b9996
|
[
"MIT"
] | 2
|
2021-10-31T13:34:28.000Z
|
2021-11-21T09:11:46.000Z
|
from .classification import *
from .common import *
from .image import *
| 18.25
| 29
| 0.753425
| 9
| 73
| 6.111111
| 0.555556
| 0.363636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.164384
| 73
| 3
| 30
| 24.333333
| 0.901639
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
53993a1bb828fc6a921c953e078dfc8940f4a60d
| 184
|
py
|
Python
|
umetnine/account/admin.py
|
jaanos/OPB-umetnine
|
f1fedd62e750317548510c412793d80c60b9e392
|
[
"MIT"
] | null | null | null |
umetnine/account/admin.py
|
jaanos/OPB-umetnine
|
f1fedd62e750317548510c412793d80c60b9e392
|
[
"MIT"
] | null | null | null |
umetnine/account/admin.py
|
jaanos/OPB-umetnine
|
f1fedd62e750317548510c412793d80c60b9e392
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import UserArtwork
# from .models import User
# Register your models here.
# admin.site.register(User)
admin.site.register(UserArtwork)
| 20.444444
| 32
| 0.793478
| 25
| 184
| 5.84
| 0.48
| 0.136986
| 0.219178
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 184
| 8
| 33
| 23
| 0.906832
| 0.418478
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
53aa7ff3e92639633021ae82763cdc6ae5481066
| 134
|
py
|
Python
|
coffin/conf/urls.py
|
spothero/coffin
|
9ea6a9173cbfed592c5b4776c489dba8d9280d52
|
[
"BSD-3-Clause"
] | null | null | null |
coffin/conf/urls.py
|
spothero/coffin
|
9ea6a9173cbfed592c5b4776c489dba8d9280d52
|
[
"BSD-3-Clause"
] | null | null | null |
coffin/conf/urls.py
|
spothero/coffin
|
9ea6a9173cbfed592c5b4776c489dba8d9280d52
|
[
"BSD-3-Clause"
] | null | null | null |
from django.conf.urls import *
handler404 = 'coffin.views.defaults.page_not_found'
handler500 = 'coffin.views.defaults.server_error'
| 26.8
| 51
| 0.80597
| 18
| 134
| 5.833333
| 0.833333
| 0.209524
| 0.361905
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.04878
| 0.08209
| 134
| 4
| 52
| 33.5
| 0.804878
| 0
| 0
| 0
| 0
| 0
| 0.522388
| 0.522388
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
53d41097a07696ea306c9d943df32e690a1a8ed1
| 72
|
py
|
Python
|
pycocotools/loss.py
|
mintanwei/IPCLs-Net
|
04937df683216a090c0749cc90ab7e517dbab0fd
|
[
"MIT"
] | null | null | null |
pycocotools/loss.py
|
mintanwei/IPCLs-Net
|
04937df683216a090c0749cc90ab7e517dbab0fd
|
[
"MIT"
] | null | null | null |
pycocotools/loss.py
|
mintanwei/IPCLs-Net
|
04937df683216a090c0749cc90ab7e517dbab0fd
|
[
"MIT"
] | null | null | null |
from torch import nn
import torch
from torch.nn import functional as F
| 14.4
| 36
| 0.805556
| 13
| 72
| 4.461538
| 0.538462
| 0.310345
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.180556
| 72
| 4
| 37
| 18
| 0.983051
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
53d915ed8952fb531f3395dc69a8724c25682022
| 47
|
py
|
Python
|
urls/__init__.py
|
AileenLumina/dwarf
|
5fc3b1b532290a474d17f84694dae1d0d53be7b4
|
[
"MIT"
] | null | null | null |
urls/__init__.py
|
AileenLumina/dwarf
|
5fc3b1b532290a474d17f84694dae1d0d53be7b4
|
[
"MIT"
] | null | null | null |
urls/__init__.py
|
AileenLumina/dwarf
|
5fc3b1b532290a474d17f84694dae1d0d53be7b4
|
[
"MIT"
] | null | null | null |
import importlib
# importlib.import_module()
| 9.4
| 27
| 0.787234
| 5
| 47
| 7.2
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12766
| 47
| 4
| 28
| 11.75
| 0.878049
| 0.531915
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
53e82b8a1f5a8b1b5a831baf0275f818ebf4aae4
| 1,776
|
py
|
Python
|
4 - camera & ysort/code/settings.py
|
aldrinbrillante/Zelda
|
83d74beca1e1d352a17fc4218cf1e2226d5788c3
|
[
"CC0-1.0"
] | null | null | null |
4 - camera & ysort/code/settings.py
|
aldrinbrillante/Zelda
|
83d74beca1e1d352a17fc4218cf1e2226d5788c3
|
[
"CC0-1.0"
] | null | null | null |
4 - camera & ysort/code/settings.py
|
aldrinbrillante/Zelda
|
83d74beca1e1d352a17fc4218cf1e2226d5788c3
|
[
"CC0-1.0"
] | null | null | null |
# game setup
WIDTH = 1280
HEIGTH = 720
FPS = 60
TILESIZE = 64
WORLD_MAP = [
['x','x','x','x','x','x','x','x','x','x','x','x','x','x','x','x','x','x','x','x'],
['x',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ','x'],
['x',' ','p',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ','x'],
['x',' ',' ','x',' ',' ',' ',' ',' ','x','x','x','x','x',' ',' ',' ',' ',' ','x'],
['x',' ',' ','x',' ',' ',' ',' ',' ',' ',' ',' ',' ','x',' ',' ',' ',' ',' ','x'],
['x',' ',' ','x',' ',' ',' ',' ',' ',' ',' ',' ',' ','x',' ',' ',' ',' ',' ','x'],
['x',' ',' ','x',' ',' ',' ',' ',' ',' ',' ',' ',' ','x',' ',' ',' ',' ',' ','x'],
['x',' ',' ','x',' ',' ',' ',' ',' ',' ',' ',' ',' ','x',' ',' ',' ',' ',' ','x'],
['x',' ',' ','x',' ',' ',' ',' ',' ',' ',' ',' ',' ','x',' ',' ',' ',' ',' ','x'],
['x',' ',' ','x',' ',' ',' ',' ',' ',' ',' ',' ',' ','x',' ',' ',' ',' ',' ','x'],
['x',' ',' ','x',' ',' ',' ',' ',' ',' ',' ',' ',' ','x',' ',' ',' ',' ',' ','x'],
['x',' ',' ','x',' ',' ',' ',' ',' ',' ',' ',' ',' ','x','x','x',' ',' ',' ','x'],
['x',' ',' ',' ',' ',' ',' ','x',' ','x',' ',' ',' ',' ',' ',' ',' ',' ',' ','x'],
['x',' ',' ',' ',' ',' ','x','x','x','x','x',' ',' ',' ',' ',' ',' ',' ',' ','x'],
['x',' ',' ',' ',' ',' ',' ','x','x','x',' ',' ',' ',' ',' ',' ',' ',' ',' ','x'],
['x',' ',' ',' ',' ',' ',' ',' ','x',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ','x'],
['x',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ','x'],
['x',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ','x'],
['x',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ','x'],
['x','x','x','x','x','x','x','x','x','x','x','x','x','x','x','x','x','x','x','x'],
]
| 63.428571
| 83
| 0.091779
| 124
| 1,776
| 1.306452
| 0.112903
| 1.345679
| 1.981481
| 2.592593
| 0.685185
| 0.685185
| 0.685185
| 0.685185
| 0.685185
| 0.685185
| 0
| 0.007796
| 0.205518
| 1,776
| 28
| 84
| 63.428571
| 0.107016
| 0.005631
| 0
| 0.5
| 0
| 0
| 0.23015
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
53ef02c674653b5749ae7c259d02425420920e68
| 29,616
|
py
|
Python
|
code/python/FactSetOwnership/v1/fds/sdk/FactSetOwnership/api/fund_holdings_api.py
|
factset/enterprise-sdk
|
3fd4d1360756c515c9737a0c9a992c7451d7de7e
|
[
"Apache-2.0"
] | 6
|
2022-02-07T16:34:18.000Z
|
2022-03-30T08:04:57.000Z
|
code/python/FactSetOwnership/v1/fds/sdk/FactSetOwnership/api/fund_holdings_api.py
|
factset/enterprise-sdk
|
3fd4d1360756c515c9737a0c9a992c7451d7de7e
|
[
"Apache-2.0"
] | 2
|
2022-02-07T05:25:57.000Z
|
2022-03-07T14:18:04.000Z
|
code/python/FactSetOwnership/v1/fds/sdk/FactSetOwnership/api/fund_holdings_api.py
|
factset/enterprise-sdk
|
3fd4d1360756c515c9737a0c9a992c7451d7de7e
|
[
"Apache-2.0"
] | null | null | null |
"""
FactSet Ownership API
FactSet’s Fund Ownership API gives access to both **Holdings** and **Holders** data.<p> Factset's Holdings endpoints gives access to all the underlying securities and their position detils held within a given fund. Fund Types supported include Open-End Mutual Funds, Closed-end Mutual Funds, and Exchange Traded Funds. Security Holders information retrieves all \"holder types\" and their positions across institutions, funds, insiders, and stakeholders.</p><p>The FactSet Ownership and Mutual Funds database collects global equity ownership data for approximately 50,000 institutions, 60,000 unique Mutual Fund portfolios, and 400,000 Insider/Stake holders from around 110 countries. For more details review our [Data Collection](https://my.apps.factset.com/oa/cms/oaAttachment/87e162be-f2d1-4f40-a85b-bfb1b020d270/20079) methodology. </p> # noqa: E501
The version of the OpenAPI document: 1.1.0
Contact: api@factset.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from multiprocessing.pool import ApplyResult
import typing
from fds.sdk.FactSetOwnership.api_client import ApiClient, Endpoint as _Endpoint
from fds.sdk.FactSetOwnership.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from fds.sdk.FactSetOwnership.exceptions import ApiException
from fds.sdk.FactSetOwnership.model.error_response import ErrorResponse
from fds.sdk.FactSetOwnership.model.fund_holdings_request import FundHoldingsRequest
from fds.sdk.FactSetOwnership.model.fund_holdings_response import FundHoldingsResponse
class FundHoldingsApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
self.get_ownership_holdings_endpoint = _Endpoint(
settings={
'response_type': (
{ 200: (FundHoldingsResponse,), 400: (ErrorResponse,), 401: (ErrorResponse,), 403: (ErrorResponse,), 415: (ErrorResponse,), 500: (ErrorResponse,), },
None
),
'auth': [
'FactSetApiKey',
'FactSetOAuth2'
],
'endpoint_path': '/factset-ownership/v1/fund-holdings',
'operation_id': 'get_ownership_holdings',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'ids',
'date',
'topn',
'asset_type',
'currency',
],
'required': [
'ids',
],
'nullable': [
],
'enum': [
'asset_type',
],
'validation': [
'ids',
]
},
root_map={
'validations': {
('ids',): {
'max_items': 10,
'min_items': 1,
},
},
'allowed_values': {
('asset_type',): {
"ALL": "ALL",
"EQ": "EQ",
"FI": "FI"
},
},
'openapi_types': {
'ids':
([str],),
'date':
(str,),
'topn':
(str,),
'asset_type':
(str,),
'currency':
(str,),
},
'attribute_map': {
'ids': 'ids',
'date': 'date',
'topn': 'topn',
'asset_type': 'assetType',
'currency': 'currency',
},
'location_map': {
'ids': 'query',
'date': 'query',
'topn': 'query',
'asset_type': 'query',
'currency': 'query',
},
'collection_format_map': {
'ids': 'csv',
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.post_ownership_holdings_endpoint = _Endpoint(
settings={
'response_type': (
{ 200: (FundHoldingsResponse,), 400: (ErrorResponse,), 401: (ErrorResponse,), 403: (ErrorResponse,), 415: (ErrorResponse,), 500: (ErrorResponse,), },
None
),
'auth': [
'FactSetApiKey',
'FactSetOAuth2'
],
'endpoint_path': '/factset-ownership/v1/fund-holdings',
'operation_id': 'post_ownership_holdings',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'fund_holdings_request',
],
'required': [
'fund_holdings_request',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'fund_holdings_request':
(FundHoldingsRequest,),
},
'attribute_map': {
},
'location_map': {
'fund_holdings_request': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
@staticmethod
def apply_kwargs_defaults(kwargs, return_http_data_only, async_req):
kwargs["async_req"] = async_req
kwargs["_return_http_data_only"] = return_http_data_only
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_spec_property_naming"] = kwargs.get("_spec_property_naming", False)
kwargs["_content_type"] = kwargs.get("_content_type")
kwargs["_host_index"] = kwargs.get("_host_index")
def get_ownership_holdings(
self,
ids,
**kwargs
) -> FundHoldingsResponse:
"""Get underlying holdings information for a requested fund identifer. # noqa: E501
Gets holdings information for list of fund identifiers. The service allows you to filter by the TopN holdings and Asset Type. # noqa: E501
This method makes a synchronous HTTP request. Returns the http data only
Args:
ids ([str]): List of requested fund identifiers. <p>***ids limit** = 10 per request*</p>
Keyword Args:
date (str): Date of holdings expressed in YYYY-MM-DD format. The fund-holdings endpoint will default to latest month-end close.. [optional]
topn (str): Limits number of holdings or holders displayed by the top *n* securities based on positions Market Value. Default is ALL, otherwise use number to limit number.. [optional] if omitted the server will use the default value of "ALL"
asset_type (str): Filter holdings by the following major asset classes - * **EQ** = Equity * **FI** = Fixed Income * **ALL** = ALL . [optional] if omitted the server will use the default value of "EQ"
currency (str): Currency code for adjusting prices. Default is Local. For a list of currency ISO codes, visit [Online Assistant Page 1470](https://oa.apps.factset.com/pages/1470).. [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
FundHoldingsResponse
Response Object
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=False)
kwargs['ids'] = \
ids
return self.get_ownership_holdings_endpoint.call_with_http_info(**kwargs)
def get_ownership_holdings_with_http_info(
self,
ids,
**kwargs
) -> typing.Tuple[FundHoldingsResponse, int, typing.MutableMapping]:
"""Get underlying holdings information for a requested fund identifer. # noqa: E501
Gets holdings information for list of fund identifiers. The service allows you to filter by the TopN holdings and Asset Type. # noqa: E501
This method makes a synchronous HTTP request. Returns http data, http status and headers
Args:
ids ([str]): List of requested fund identifiers. <p>***ids limit** = 10 per request*</p>
Keyword Args:
date (str): Date of holdings expressed in YYYY-MM-DD format. The fund-holdings endpoint will default to latest month-end close.. [optional]
topn (str): Limits number of holdings or holders displayed by the top *n* securities based on positions Market Value. Default is ALL, otherwise use number to limit number.. [optional] if omitted the server will use the default value of "ALL"
asset_type (str): Filter holdings by the following major asset classes - * **EQ** = Equity * **FI** = Fixed Income * **ALL** = ALL . [optional] if omitted the server will use the default value of "EQ"
currency (str): Currency code for adjusting prices. Default is Local. For a list of currency ISO codes, visit [Online Assistant Page 1470](https://oa.apps.factset.com/pages/1470).. [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
FundHoldingsResponse
Response Object
int
Http Status Code
dict
Dictionary of the response headers
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=False)
kwargs['ids'] = \
ids
return self.get_ownership_holdings_endpoint.call_with_http_info(**kwargs)
def get_ownership_holdings_async(
self,
ids,
**kwargs
) -> "ApplyResult[FundHoldingsResponse]":
"""Get underlying holdings information for a requested fund identifer. # noqa: E501
Gets holdings information for list of fund identifiers. The service allows you to filter by the TopN holdings and Asset Type. # noqa: E501
This method makes a asynchronous HTTP request. Returns the http data, wrapped in ApplyResult
Args:
ids ([str]): List of requested fund identifiers. <p>***ids limit** = 10 per request*</p>
Keyword Args:
date (str): Date of holdings expressed in YYYY-MM-DD format. The fund-holdings endpoint will default to latest month-end close.. [optional]
topn (str): Limits number of holdings or holders displayed by the top *n* securities based on positions Market Value. Default is ALL, otherwise use number to limit number.. [optional] if omitted the server will use the default value of "ALL"
asset_type (str): Filter holdings by the following major asset classes - * **EQ** = Equity * **FI** = Fixed Income * **ALL** = ALL . [optional] if omitted the server will use the default value of "EQ"
currency (str): Currency code for adjusting prices. Default is Local. For a list of currency ISO codes, visit [Online Assistant Page 1470](https://oa.apps.factset.com/pages/1470).. [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[FundHoldingsResponse]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=True)
kwargs['ids'] = \
ids
return self.get_ownership_holdings_endpoint.call_with_http_info(**kwargs)
def get_ownership_holdings_with_http_info_async(
self,
ids,
**kwargs
) -> "ApplyResult[typing.Tuple[FundHoldingsResponse, int, typing.MutableMapping]]":
"""Get underlying holdings information for a requested fund identifer. # noqa: E501
Gets holdings information for list of fund identifiers. The service allows you to filter by the TopN holdings and Asset Type. # noqa: E501
This method makes a asynchronous HTTP request. Returns http data, http status and headers, wrapped in ApplyResult
Args:
ids ([str]): List of requested fund identifiers. <p>***ids limit** = 10 per request*</p>
Keyword Args:
date (str): Date of holdings expressed in YYYY-MM-DD format. The fund-holdings endpoint will default to latest month-end close.. [optional]
topn (str): Limits number of holdings or holders displayed by the top *n* securities based on positions Market Value. Default is ALL, otherwise use number to limit number.. [optional] if omitted the server will use the default value of "ALL"
asset_type (str): Filter holdings by the following major asset classes - * **EQ** = Equity * **FI** = Fixed Income * **ALL** = ALL . [optional] if omitted the server will use the default value of "EQ"
currency (str): Currency code for adjusting prices. Default is Local. For a list of currency ISO codes, visit [Online Assistant Page 1470](https://oa.apps.factset.com/pages/1470).. [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[(FundHoldingsResponse, int, typing.Dict)]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=True)
kwargs['ids'] = \
ids
return self.get_ownership_holdings_endpoint.call_with_http_info(**kwargs)
def post_ownership_holdings(
self,
fund_holdings_request,
**kwargs
) -> FundHoldingsResponse:
"""Get holdings for a list of funds. # noqa: E501
Gets Holding information for a long list of Fund objects. # noqa: E501
This method makes a synchronous HTTP request. Returns the http data only
Args:
fund_holdings_request (FundHoldingsRequest): Requesting Underlying Holdings for a list of Fund Identifiers.
Keyword Args:
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
FundHoldingsResponse
Response Object
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=False)
kwargs['fund_holdings_request'] = \
fund_holdings_request
return self.post_ownership_holdings_endpoint.call_with_http_info(**kwargs)
def post_ownership_holdings_with_http_info(
self,
fund_holdings_request,
**kwargs
) -> typing.Tuple[FundHoldingsResponse, int, typing.MutableMapping]:
"""Get holdings for a list of funds. # noqa: E501
Gets Holding information for a long list of Fund objects. # noqa: E501
This method makes a synchronous HTTP request. Returns http data, http status and headers
Args:
fund_holdings_request (FundHoldingsRequest): Requesting Underlying Holdings for a list of Fund Identifiers.
Keyword Args:
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
FundHoldingsResponse
Response Object
int
Http Status Code
dict
Dictionary of the response headers
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=False)
kwargs['fund_holdings_request'] = \
fund_holdings_request
return self.post_ownership_holdings_endpoint.call_with_http_info(**kwargs)
def post_ownership_holdings_async(
self,
fund_holdings_request,
**kwargs
) -> "ApplyResult[FundHoldingsResponse]":
"""Get holdings for a list of funds. # noqa: E501
Gets Holding information for a long list of Fund objects. # noqa: E501
This method makes a asynchronous HTTP request. Returns the http data, wrapped in ApplyResult
Args:
fund_holdings_request (FundHoldingsRequest): Requesting Underlying Holdings for a list of Fund Identifiers.
Keyword Args:
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[FundHoldingsResponse]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=True)
kwargs['fund_holdings_request'] = \
fund_holdings_request
return self.post_ownership_holdings_endpoint.call_with_http_info(**kwargs)
def post_ownership_holdings_with_http_info_async(
self,
fund_holdings_request,
**kwargs
) -> "ApplyResult[typing.Tuple[FundHoldingsResponse, int, typing.MutableMapping]]":
"""Get holdings for a list of funds. # noqa: E501
Gets Holding information for a long list of Fund objects. # noqa: E501
This method makes a asynchronous HTTP request. Returns http data, http status and headers, wrapped in ApplyResult
Args:
fund_holdings_request (FundHoldingsRequest): Requesting Underlying Holdings for a list of Fund Identifiers.
Keyword Args:
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[(FundHoldingsResponse, int, typing.Dict)]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=True)
kwargs['fund_holdings_request'] = \
fund_holdings_request
return self.post_ownership_holdings_endpoint.call_with_http_info(**kwargs)
| 49.942664
| 860
| 0.596738
| 3,382
| 29,616
| 5.09728
| 0.096984
| 0.029236
| 0.018099
| 0.017634
| 0.854574
| 0.837926
| 0.837462
| 0.822844
| 0.822147
| 0.820175
| 0
| 0.009983
| 0.333671
| 29,616
| 592
| 861
| 50.027027
| 0.863586
| 0.605416
| 0
| 0.570281
| 0
| 0
| 0.160252
| 0.063226
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040161
| false
| 0
| 0.040161
| 0
| 0.116466
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
54d8ce9fda6d8630409124fe8f41eb51b96ce584
| 620
|
py
|
Python
|
rcnn/lib/python3.6/site-packages/tensorflow/graph_util/__init__.py
|
dreamingweaver/making_passportImage
|
68f23411780ff82abe934dfae5fc04acb80f2c49
|
[
"MIT"
] | 1
|
2019-01-12T13:17:32.000Z
|
2019-01-12T13:17:32.000Z
|
rcnn/lib/python3.6/site-packages/tensorflow/graph_util/__init__.py
|
dreamingweaver/making_passportImage
|
68f23411780ff82abe934dfae5fc04acb80f2c49
|
[
"MIT"
] | null | null | null |
rcnn/lib/python3.6/site-packages/tensorflow/graph_util/__init__.py
|
dreamingweaver/making_passportImage
|
68f23411780ff82abe934dfae5fc04acb80f2c49
|
[
"MIT"
] | null | null | null |
# This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""Helpers to manipulate a tensor graph in python.
"""
from __future__ import print_function
from tensorflow.python.framework.graph_util import convert_variables_to_constants
from tensorflow.python.framework.graph_util import extract_sub_graph
from tensorflow.python.framework.graph_util import must_run_on_cpu
from tensorflow.python.framework.graph_util import remove_training_nodes
from tensorflow.python.framework.graph_util import tensor_shape_from_node_def_name
del print_function
| 38.75
| 82
| 0.859677
| 91
| 620
| 5.549451
| 0.527473
| 0.190099
| 0.19802
| 0.287129
| 0.435644
| 0.435644
| 0.435644
| 0
| 0
| 0
| 0
| 0
| 0.085484
| 620
| 15
| 83
| 41.333333
| 0.890653
| 0.280645
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.857143
| 0
| 0.857143
| 0.285714
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
54fea51f6d1cb4c372a2272b257c00584e640961
| 123
|
py
|
Python
|
adventures/bucketlist/admin.py
|
lakivisi-zz/adventures
|
f094ac3fa1d5c85d65650c9cdc2ff2f60f9189a5
|
[
"MIT"
] | null | null | null |
adventures/bucketlist/admin.py
|
lakivisi-zz/adventures
|
f094ac3fa1d5c85d65650c9cdc2ff2f60f9189a5
|
[
"MIT"
] | null | null | null |
adventures/bucketlist/admin.py
|
lakivisi-zz/adventures
|
f094ac3fa1d5c85d65650c9cdc2ff2f60f9189a5
|
[
"MIT"
] | 1
|
2021-01-14T21:27:32.000Z
|
2021-01-14T21:27:32.000Z
|
from django.contrib import admin
from bucketlist.models import Bucketlist, Item
admin.site.register((Bucketlist, Item))
| 17.571429
| 46
| 0.804878
| 16
| 123
| 6.1875
| 0.625
| 0.282828
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113821
| 123
| 6
| 47
| 20.5
| 0.908257
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
073308290954ca159881f4e1d0c513bbe9ea4754
| 64
|
py
|
Python
|
unittest/functions.test.py
|
mokpolar/devops-eng-training
|
2cf327a37e4575991f2846f42cad03f3cbab770d
|
[
"MIT"
] | null | null | null |
unittest/functions.test.py
|
mokpolar/devops-eng-training
|
2cf327a37e4575991f2846f42cad03f3cbab770d
|
[
"MIT"
] | 1
|
2021-05-17T07:43:26.000Z
|
2021-05-17T07:43:26.000Z
|
unittest/functions.test.py
|
mokpolar/devops-eng-training
|
2cf327a37e4575991f2846f42cad03f3cbab770d
|
[
"MIT"
] | 9
|
2021-05-06T06:00:18.000Z
|
2021-05-15T08:30:47.000Z
|
# TODO(everyone): 더하기, 빼기, 곱하기, 나누기 함수 테스트 케이스 작성
import pytest
| 21.333333
| 49
| 0.703125
| 12
| 64
| 3.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1875
| 64
| 3
| 50
| 21.333333
| 0.865385
| 0.734375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
07463e6b6117126e9dd1efba8bcdf70ce2d2e411
| 103
|
py
|
Python
|
soccer/exceptions.py
|
elimt/soccer-cli
|
400665812bbe2d29b8ead7627713f38fa688bd75
|
[
"MIT"
] | 1,185
|
2015-08-19T15:52:35.000Z
|
2022-03-27T19:28:36.000Z
|
soccer/exceptions.py
|
elimt/soccer-cli
|
400665812bbe2d29b8ead7627713f38fa688bd75
|
[
"MIT"
] | 129
|
2015-09-01T18:32:21.000Z
|
2022-02-13T06:35:38.000Z
|
soccer/exceptions.py
|
elimt/soccer-cli
|
400665812bbe2d29b8ead7627713f38fa688bd75
|
[
"MIT"
] | 312
|
2015-09-01T17:58:15.000Z
|
2022-03-27T19:29:55.000Z
|
class IncorrectParametersException(Exception):
pass
class APIErrorException(Exception):
pass
| 14.714286
| 46
| 0.786408
| 8
| 103
| 10.125
| 0.625
| 0.320988
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15534
| 103
| 6
| 47
| 17.166667
| 0.931034
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
075125aa69f3fb3a8390c035fd0dc11f2920d054
| 9,409
|
py
|
Python
|
papi_sdk/tests/mocked_data/overview.py
|
stanislav-losev/papi-sdk-python
|
4a296745d626ef13c6d1170e9d3569cb1c37eb3c
|
[
"MIT"
] | 1
|
2022-02-01T08:53:24.000Z
|
2022-02-01T08:53:24.000Z
|
papi_sdk/tests/mocked_data/overview.py
|
stanislav-losev/papi-sdk-python
|
4a296745d626ef13c6d1170e9d3569cb1c37eb3c
|
[
"MIT"
] | 2
|
2021-01-18T07:57:29.000Z
|
2021-06-23T11:04:14.000Z
|
papi_sdk/tests/mocked_data/overview.py
|
stanislav-losev/papi-sdk-python
|
4a296745d626ef13c6d1170e9d3569cb1c37eb3c
|
[
"MIT"
] | 3
|
2020-12-30T13:09:45.000Z
|
2020-12-30T13:42:33.000Z
|
overview_response = {
"debug": None,
"error": None,
"status": "ok",
"data": [
{
"endpoint": "api/b2b/v3/general/contract/data/info/",
"is_active": True,
"is_debug_mode": False,
"is_limited": True,
"requests_number": 100,
"seconds_number": 86400,
},
{
"endpoint": "api/b2b/v3/general/document/closing_documents/download/",
"is_active": True,
"is_debug_mode": False,
"is_limited": True,
"requests_number": 100,
"seconds_number": 86400,
},
{
"endpoint": "api/b2b/v3/general/document/closing_documents/info/",
"is_active": True,
"is_debug_mode": False,
"is_limited": True,
"requests_number": 100,
"seconds_number": 86400,
},
{
"endpoint": "api/b2b/v3/general/financial/info/",
"is_active": True,
"is_debug_mode": False,
"is_limited": True,
"requests_number": 100,
"seconds_number": 86400,
},
{
"endpoint": "api/b2b/v3/hotel/info/",
"is_active": True,
"is_debug_mode": False,
"is_limited": True,
"requests_number": 30,
"seconds_number": 60,
},
{
"endpoint": "api/b2b/v3/hotel/info/dump/",
"is_active": True,
"is_debug_mode": False,
"is_limited": True,
"requests_number": 100,
"seconds_number": 86400,
},
{
"endpoint": "api/b2b/v3/hotel/info/incremental_dump/",
"is_active": True,
"is_debug_mode": False,
"is_limited": True,
"requests_number": 100,
"seconds_number": 86400,
},
{
"endpoint": "api/b2b/v3/hotel/matching/dump/",
"is_active": True,
"is_debug_mode": False,
"is_limited": True,
"requests_number": 100,
"seconds_number": 86400,
},
{
"endpoint": "api/b2b/v3/hotel/order/booking/finish/",
"is_active": True,
"is_debug_mode": False,
"is_limited": True,
"requests_number": 30,
"seconds_number": 60,
},
{
"endpoint": "api/b2b/v3/hotel/order/booking/finish/status/",
"is_active": True,
"is_debug_mode": False,
"is_limited": True,
"requests_number": 30,
"seconds_number": 60,
},
{
"endpoint": "api/b2b/v3/hotel/order/booking/form/",
"is_active": True,
"is_debug_mode": False,
"is_limited": True,
"requests_number": 30,
"seconds_number": 60,
},
{
"endpoint": "api/b2b/v3/hotel/order/cancel/",
"is_active": True,
"is_debug_mode": False,
"is_limited": True,
"requests_number": 30,
"seconds_number": 60,
},
{
"endpoint": "api/b2b/v3/hotel/order/document/info_invoice/download/",
"is_active": True,
"is_debug_mode": False,
"is_limited": True,
"requests_number": 30,
"seconds_number": 60,
},
{
"endpoint": "api/b2b/v3/hotel/order/document/single_act/download/",
"is_active": True,
"is_debug_mode": False,
"is_limited": True,
"requests_number": 30,
"seconds_number": 60,
},
{
"endpoint": "api/b2b/v3/hotel/order/document/voucher/download/",
"is_active": True,
"is_debug_mode": False,
"is_limited": True,
"requests_number": 30,
"seconds_number": 60,
},
{
"endpoint": "api/b2b/v3/hotel/order/info/",
"is_active": True,
"is_debug_mode": False,
"is_limited": True,
"requests_number": 30,
"seconds_number": 60,
},
{
"endpoint": "api/b2b/v3/hotel/reviews/dump/",
"is_active": True,
"is_debug_mode": False,
"is_limited": True,
"requests_number": 100,
"seconds_number": 86400,
},
{
"endpoint": "api/b2b/v3/hotel/static/",
"is_active": True,
"is_debug_mode": False,
"is_limited": True,
"requests_number": 100,
"seconds_number": 86400,
},
{
"endpoint": "api/b2b/v3/search/serp/hotels/",
"is_active": True,
"is_debug_mode": False,
"is_limited": True,
"requests_number": 30,
"seconds_number": 60,
},
{
"endpoint": "api/b2b/v3/search/hp/",
"is_active": True,
"is_debug_mode": False,
"is_limited": True,
"requests_number": 30,
"seconds_number": 60,
},
{
"endpoint": "api/b2b/v3/search/multicomplete/",
"is_active": True,
"is_debug_mode": False,
"is_limited": True,
"requests_number": 30,
"seconds_number": 60,
},
{
"endpoint": "api/b2b/v3/ordergroup/order/add/",
"is_active": True,
"is_debug_mode": False,
"is_limited": True,
"requests_number": 30,
"seconds_number": 60,
},
{
"endpoint": "api/b2b/v3/ordergroup/create/",
"is_active": True,
"is_debug_mode": False,
"is_limited": True,
"requests_number": 30,
"seconds_number": 60,
},
{
"endpoint": "api/b2b/v3/ordergroup/disband/",
"is_active": True,
"is_debug_mode": False,
"is_limited": True,
"requests_number": 30,
"seconds_number": 60,
},
{
"endpoint": "api/b2b/v3/ordergroup/document/invoice/download/",
"is_active": True,
"is_debug_mode": False,
"is_limited": True,
"requests_number": 30,
"seconds_number": 60,
},
{
"endpoint": "api/b2b/v3/ordergroup/info/",
"is_active": True,
"is_debug_mode": False,
"is_limited": True,
"requests_number": 30,
"seconds_number": 60,
},
{
"endpoint": "api/b2b/v3/ordergroup/pay/overpay/",
"is_active": True,
"is_debug_mode": False,
"is_limited": True,
"requests_number": 30,
"seconds_number": 60,
},
{
"endpoint": "api/b2b/v3/ordergroup/order/remove/",
"is_active": True,
"is_debug_mode": False,
"is_limited": True,
"requests_number": 30,
"seconds_number": 60,
},
{
"endpoint": "api/b2b/v3/overview/",
"is_active": True,
"is_debug_mode": False,
"is_limited": True,
"requests_number": 100,
"seconds_number": 86400,
},
{
"endpoint": "api/b2b/v3/profiles/create/",
"is_active": True,
"is_debug_mode": False,
"is_limited": True,
"requests_number": 30,
"seconds_number": 60,
},
{
"endpoint": "api/b2b/v3/profiles/delete/",
"is_active": True,
"is_debug_mode": False,
"is_limited": True,
"requests_number": 30,
"seconds_number": 60,
},
{
"endpoint": "api/b2b/v3/profiles/disable/",
"is_active": True,
"is_debug_mode": False,
"is_limited": True,
"requests_number": 30,
"seconds_number": 60,
},
{
"endpoint": "api/b2b/v3/profiles/edit/",
"is_active": True,
"is_debug_mode": False,
"is_limited": True,
"requests_number": 30,
"seconds_number": 60,
},
{
"endpoint": "api/b2b/v3/profiles/list/",
"is_active": True,
"is_debug_mode": False,
"is_limited": True,
"requests_number": 30,
"seconds_number": 60,
},
{
"endpoint": "api/b2b/v3/profiles/restore/",
"is_active": True,
"is_debug_mode": False,
"is_limited": True,
"requests_number": 30,
"seconds_number": 60,
},
{
"endpoint": "api/b2b/v3/search/serp/region/",
"is_active": True,
"is_debug_mode": False,
"is_limited": True,
"requests_number": 30,
"seconds_number": 60,
},
{
"endpoint": "api/affiliate/v3/overview/",
"is_active": True,
"is_debug_mode": False,
"is_limited": True,
"requests_number": 30,
"seconds_number": 60,
},
],
}
| 30.950658
| 82
| 0.455628
| 862
| 9,409
| 4.708817
| 0.078886
| 0.100271
| 0.109387
| 0.127618
| 0.942597
| 0.934959
| 0.930525
| 0.930525
| 0.924612
| 0.924612
| 0
| 0.046591
| 0.404613
| 9,409
| 303
| 83
| 31.052805
| 0.677972
| 0
| 0
| 0.610561
| 0
| 0
| 0.405144
| 0.129344
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0759cebe101d132eb123673aae139f2fc2abee01
| 326
|
py
|
Python
|
tests/test_my_package.py
|
lucianojardim/pyskeleton
|
19b67f1e57c19bd882271be06b0605b6ba355db0
|
[
"MIT"
] | null | null | null |
tests/test_my_package.py
|
lucianojardim/pyskeleton
|
19b67f1e57c19bd882271be06b0605b6ba355db0
|
[
"MIT"
] | null | null | null |
tests/test_my_package.py
|
lucianojardim/pyskeleton
|
19b67f1e57c19bd882271be06b0605b6ba355db0
|
[
"MIT"
] | null | null | null |
""" tests for my_package """
from .context import my_package
def test_increment():
""" test increment """
assert my_package.increment(3) == 4
def test_decrement():
""" test decrement """
assert my_package.decrement(3) == 2
def test_double():
""" test double """
assert my_package.double(3) == 6
| 17.157895
| 39
| 0.634969
| 42
| 326
| 4.738095
| 0.404762
| 0.226131
| 0.226131
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023438
| 0.214724
| 326
| 18
| 40
| 18.111111
| 0.753906
| 0.199387
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.428571
| 1
| 0.428571
| true
| 0
| 0.142857
| 0
| 0.571429
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
ab059a3f68c0786e384bdc1f9523ecd6e3bc33aa
| 131
|
py
|
Python
|
ranking/admin.py
|
ShitamatsugeFactory/KokoWalk_Server
|
9af36f750a512aa56635b04a190589d76822bc86
|
[
"MIT"
] | 3
|
2017-01-01T07:34:54.000Z
|
2017-01-04T02:18:37.000Z
|
ranking/admin.py
|
ShitamatsugeFactory/KokoWalk_Server
|
9af36f750a512aa56635b04a190589d76822bc86
|
[
"MIT"
] | null | null | null |
ranking/admin.py
|
ShitamatsugeFactory/KokoWalk_Server
|
9af36f750a512aa56635b04a190589d76822bc86
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Ranking
@admin.register(Ranking)
class Ranking(admin.ModelAdmin):
pass
| 16.375
| 33
| 0.778626
| 18
| 131
| 5.722222
| 0.666667
| 0.23301
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137405
| 131
| 8
| 34
| 16.375
| 0.902655
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.2
| 0.4
| null | null | 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
|
0
| 6
|
ab26df89f6ef8af4754cf6ce61455085d2b634e1
| 32
|
py
|
Python
|
dlex_impl/deep_sets/src/datasets/__init__.py
|
dvtrung/dl-torch
|
b49e57d10d32bb223e2d7643f2579ccc32c63a9a
|
[
"MIT"
] | null | null | null |
dlex_impl/deep_sets/src/datasets/__init__.py
|
dvtrung/dl-torch
|
b49e57d10d32bb223e2d7643f2579ccc32c63a9a
|
[
"MIT"
] | null | null | null |
dlex_impl/deep_sets/src/datasets/__init__.py
|
dvtrung/dl-torch
|
b49e57d10d32bb223e2d7643f2579ccc32c63a9a
|
[
"MIT"
] | null | null | null |
from .modelnet import ModelNet40
| 32
| 32
| 0.875
| 4
| 32
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.068966
| 0.09375
| 32
| 1
| 32
| 32
| 0.896552
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
db7354bd91d165215e863d1dc8f6118a73eba28f
| 35
|
py
|
Python
|
tests/unit/test_modulegraph/testdata/nspkg/src/parent/namedpkg/parent.py
|
hawkhai/pyinstaller
|
016a24479b34de161792c72dde455a81ad4c78ae
|
[
"Apache-2.0"
] | 9,267
|
2015-01-01T04:08:45.000Z
|
2022-03-31T11:42:38.000Z
|
tests/unit/test_modulegraph/testdata/nspkg/src/parent/namedpkg/parent.py
|
jeremysanders/pyinstaller
|
321b24f9a9a5978337735816b36ca6b4a90a2fb4
|
[
"Apache-2.0"
] | 5,150
|
2015-01-01T12:09:56.000Z
|
2022-03-31T18:06:12.000Z
|
tests/unit/test_modulegraph/testdata/nspkg/src/parent/namedpkg/parent.py
|
jeremysanders/pyinstaller
|
321b24f9a9a5978337735816b36ca6b4a90a2fb4
|
[
"Apache-2.0"
] | 2,101
|
2015-01-03T10:25:27.000Z
|
2022-03-30T11:04:42.000Z
|
""" parent packages """
import sys
| 11.666667
| 23
| 0.657143
| 4
| 35
| 5.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.171429
| 35
| 2
| 24
| 17.5
| 0.793103
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
dbc23709c7cec8f99f3984d24ca2dcaf6002878b
| 191
|
py
|
Python
|
statsmodels/sandbox/tools/__init__.py
|
yarikoptic/statsmodels
|
f990cb1a1ef0c9883c9394444e6f9d027efabec6
|
[
"BSD-3-Clause"
] | 34
|
2018-07-13T11:30:46.000Z
|
2022-01-05T13:48:10.000Z
|
venv/lib/python3.6/site-packages/statsmodels/sandbox/tools/__init__.py
|
HeyWeiPan/vnpy_crypto
|
844381797a475a01c05a4e162592a5a6e3a48032
|
[
"MIT"
] | 7
|
2015-11-20T08:33:04.000Z
|
2020-07-24T19:34:39.000Z
|
venv/lib/python3.6/site-packages/statsmodels/sandbox/tools/__init__.py
|
HeyWeiPan/vnpy_crypto
|
844381797a475a01c05a4e162592a5a6e3a48032
|
[
"MIT"
] | 28
|
2015-04-01T20:02:25.000Z
|
2021-07-03T00:09:28.000Z
|
'''some helper function for principal component and time series analysis
Status
------
pca : tested against matlab
pcasvd : tested against matlab
'''
from .tools_pca import * #pca, pcasvd
| 17.363636
| 72
| 0.73822
| 25
| 191
| 5.6
| 0.76
| 0.185714
| 0.271429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.167539
| 191
| 10
| 73
| 19.1
| 0.880503
| 0.816754
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
91a69bf2e3b09b5c2d2ab9c4930e98ed67a67ef3
| 181
|
py
|
Python
|
graphPlot/__init__.py
|
francisp336/graphPlot
|
2e12535aebf39b65c93a16fe8de9b657555392e8
|
[
"MIT"
] | null | null | null |
graphPlot/__init__.py
|
francisp336/graphPlot
|
2e12535aebf39b65c93a16fe8de9b657555392e8
|
[
"MIT"
] | 1
|
2020-09-29T22:01:56.000Z
|
2020-09-29T22:01:56.000Z
|
graphPlot/__init__.py
|
francisp336/graphPlot
|
2e12535aebf39b65c93a16fe8de9b657555392e8
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as ptc
import random as r
from .graphPlot import *
# def _unit_test():
# TODO define unit test
| 13.923077
| 32
| 0.745856
| 28
| 181
| 4.75
| 0.642857
| 0.240602
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.20442
| 181
| 12
| 33
| 15.083333
| 0.923611
| 0.21547
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
91e6edfa77d27dad68c6edfea1331fb3d2c2dab7
| 32,178
|
py
|
Python
|
Tests/scripts/hook_validations/tests/integration_test.py
|
TBE-Comp/content
|
3a27b1c779dec1e4ee918ca2da77538238dd10f0
|
[
"MIT"
] | 7
|
2020-09-24T22:38:01.000Z
|
2021-07-14T15:58:35.000Z
|
Tests/scripts/hook_validations/tests/integration_test.py
|
TBE-Comp/content
|
3a27b1c779dec1e4ee918ca2da77538238dd10f0
|
[
"MIT"
] | 7
|
2021-03-25T23:09:39.000Z
|
2021-09-23T23:27:14.000Z
|
Tests/scripts/hook_validations/tests/integration_test.py
|
TBE-Comp/content
|
3a27b1c779dec1e4ee918ca2da77538238dd10f0
|
[
"MIT"
] | 2
|
2020-12-08T17:03:33.000Z
|
2021-07-13T18:32:06.000Z
|
from Tests.scripts.hook_validations.integration import IntegrationValidator
def test_removed_docker_image_on_existing_integration():
validator = IntegrationValidator("temp_file", check_git=False)
validator.old_integration = {
"script": {
"dockerimage": "test"
}
}
validator.current_integration = {
"script": {
"no": "dockerimage"
}
}
assert validator.is_docker_image_changed(), "The script validator couldn't find the docker image as changed"
def test_updated_docker_image_on_existing_integration():
validator = IntegrationValidator("temp_file", check_git=False)
validator.old_integration = {
"script": {
"dockerimage": "test"
}
}
validator.current_integration = {
"script": {
"dockerimage": "test1"
}
}
assert validator.is_docker_image_changed(), "The script validator couldn't find the docker image as changed"
def test_not_changed_docker_image_on_existing_integration():
validator = IntegrationValidator("temp_file", check_git=False)
validator.old_integration = {}
validator.current_integration = {}
assert validator.is_docker_image_changed() is False, "The script validator couldn't find the docker "\
"image as changed"
def test_added_docker_image_on_existing_integration():
validator = IntegrationValidator("temp_file", check_git=False)
validator.old_integration = {}
validator.current_integration = {
"script": {
"dockerimage": "test1"
}
}
assert validator.is_docker_image_changed(), "The script validator couldn't find the docker image as changed"
def test_added_required_field_in_integration():
validator = IntegrationValidator("temp_file", check_git=False)
validator.old_integration = {
"configuration": [
{
"name": "test",
"required": False
}
]
}
validator.current_integration = {
"configuration": [
{
"name": "test",
"required": True
}
]
}
assert validator.is_added_required_fields(), "The script validator couldn't find the new required fields"
def test_changed_required_field_to_not_required_in_integration():
validator = IntegrationValidator("temp_file", check_git=False)
validator.old_integration = {
"configuration": [
{
"name": "test",
"required": True
}
]
}
validator.current_integration = {
"configuration": [
{
"name": "test",
"required": False
}
]
}
assert validator.is_added_required_fields() is False, "The script validator found the change to not reuquired " \
"as a one who breaks backward compatability"
def test_not_changed_required_field_in_integration():
validator = IntegrationValidator("temp_file", check_git=False)
validator.old_integration = {
"configuration": [
{
"name": "test",
"required": True
}
]
}
validator.current_integration = {
"configuration": [
{
"name": "test",
"required": True
}
]
}
assert validator.is_added_required_fields() is False, "The script validator found a backward compatability " \
"change although no such change was done"
def test_not_changed_required_field_scenario2_in_integration():
validator = IntegrationValidator("temp_file", check_git=False)
validator.old_integration = {
"configuration": [
{
"name": "test",
"required": False
}
]
}
validator.current_integration = {
"configuration": [
{
"name": "test",
"required": False
}
]
}
assert validator.is_added_required_fields() is False, "The script validator found a backward compatability " \
"change although no such change was done"
def test_configuration_extraction():
validator = IntegrationValidator("temp_file", check_git=False)
integration_json = {
"configuration": [
{
"name": "test",
"required": False
},
{
"name": "test1",
"required": True
}
]
}
expected = {
"test": False,
"test1": True
}
assert validator._get_field_to_required_dict(integration_json) == expected, "Failed to extract configuration"
def test_not_changed_context_in_integration():
validator = IntegrationValidator("temp_file", check_git=False)
validator.old_integration = {
"commands": [
{
"name": "test",
"outputs": [
{
"contextPath": "test"
}
]
}
]
}
validator.current_integration = {
"commands": [
{
"name": "test",
"outputs": [
{
"contextPath": "test"
}
]
}
]
}
assert validator.is_changed_context_path() is False, "The script validator found a backward compatability " \
"change although no such change was done"
def test_changed_context_in_integration():
validator = IntegrationValidator("temp_file", check_git=False)
validator.old_integration = {
"script": {
"commands": [
{
"name": "test",
"outputs": [
{
"contextPath": "test"
}
]
}
]
}
}
validator.current_integration = {
"script": {
"commands": [
{
"name": "test",
"outputs": [
{
"contextPath": "changed that"
}
]
}
]
}
}
assert validator.is_changed_context_path(), "The script validator didn't find a backward compatability " \
"issue although the context path has changed"
def test_added_context_in_integration():
validator = IntegrationValidator("temp_file", check_git=False)
validator.old_integration = {
"commands": [
{
"name": "test",
"outputs": [
{
"contextPath": "test"
}
]
}
]
}
validator.current_integration = {
"commands": [
{
"name": "test",
"outputs": [
{
"contextPath": "test"
},
{
"contextPath": "changed that"
}
]
}
]
}
assert validator.is_changed_context_path() is False, "The script validator didn't find a backward compatability " \
"issue although the context path has changed"
def test_added_new_command_context_path_in_integration():
validator = IntegrationValidator("temp_file", check_git=False)
validator.old_integration = {
"commands": [
{
"name": "test",
"outputs": [
{
"contextPath": "test"
}
]
}
]
}
validator.current_integration = {
"commands": [
{
"name": "test",
"outputs": [
{
"contextPath": "test"
}
]
},
{
"name": "test2",
"outputs": [
{
"contextPath": "new command"
}
]
}
]
}
assert validator.is_changed_context_path() is False, "The script validator found a backward compatibility " \
"issue although the context path has not changed"
def test_changed_required_arg_for_command_in_integration():
validator = IntegrationValidator("temp_file", check_git=False)
validator.old_integration = {
"script": {
"commands": [
{
"name": "test",
"arguments": [
{
"name": "test"
}
]
}
]
}
}
validator.current_integration = {
"script": {
"commands": [
{
"name": "test",
"arguments": [
{
"name": "test",
"required": True
}
]
}
]
}
}
assert validator.is_changed_command_name_or_arg(), "The script validator did not found a backward compatibility " \
"issue although the command was added with required arg"
def test_added_required_arg_for_command_in_integration():
validator = IntegrationValidator("temp_file", check_git=False)
validator.old_integration = {
"script": {
"commands": [
{
"name": "test",
"arguments": [
{
"name": "test"
}
]
}
]
}
}
validator.current_integration = {
"script": {
"commands": [
{
"name": "test",
"arguments": [
{
"name": "test",
},
{
"name": "test1",
"required": True
}
]
}
]
}
}
assert validator.is_changed_command_name_or_arg(), "The script validator did not found a backward compatibility " \
"issue although the command was added with required arg"
def test_renamed_arg_in_command_in_integration():
validator = IntegrationValidator("temp_file", check_git=False)
validator.old_integration = {
"script": {
"commands": [
{
"name": "test",
"arguments": [
{
"name": "test"
}
]
}
]
}
}
validator.current_integration = {
"script": {
"commands": [
{
"name": "test",
"arguments": [
{
"name": "test1",
}
]
}
]
}
}
assert validator.is_changed_command_name_or_arg(), "The script validator did not found a backward compatibility " \
"issue although the command args were renamed"
def test_not_requires_arg_in_command_in_integration():
validator = IntegrationValidator("temp_file", check_git=False)
validator.old_integration = {
"commands": [
{
"name": "test",
"arguments": [
{
"name": "test"
}
]
}
]
}
validator.current_integration = {
"commands": [
{
"name": "test",
"arguments": [
{
"name": "test"
},
{
"name": "test1",
}
]
}
]
}
assert validator.is_changed_command_name_or_arg() is False, "The script validator found a backward compatibility " \
"issue although a new not required command was added"
def test_not_changed_command_in_integration():
validator = IntegrationValidator("temp_file", check_git=False)
validator.old_integration = {
"script": {
"commands": [
{
"name": "test",
"arguments": [
{
"name": "test"
}
]
}
]
}
}
validator.current_integration = {
"script": {
"commands": [
{
"name": "test",
"arguments": [
{
"name": "test"
}
]
}
]
}
}
assert validator.is_changed_command_name_or_arg() is False, "The script validator found a backward compatibility " \
"issue although the commands haven't changed"
def test_no_duplicate_params():
validator = IntegrationValidator("temp_file", check_git=False)
validator.current_integration = {
"configuration": [
{
"name": "test"
},
{
"name": "tes1",
}
]
}
assert validator.is_there_duplicate_params() is False, \
"The integration validator found duplicated params although there are none"
def test_duplicated_params():
validator = IntegrationValidator("temp_file", check_git=False)
validator.current_integration = {
"configuration": [
{
"name": "test"
},
{
"name": "test",
}
]
}
assert validator.is_there_duplicate_params(), \
"The integration validator did not find duplicated params although there are duplicates"
def test_no_duplicate_args():
validator = IntegrationValidator("temp_file", check_git=False)
validator.current_integration = {
"script": {
"commands": [
{
"name": "testing",
"arguments": [
{
"name": "test1"
},
{
"name": "test2"
}
]
}
]
}
}
assert validator.is_there_duplicate_args() is False, \
"The integration validator found duplicated args although there are none"
def test_duplicated_argss():
validator = IntegrationValidator("temp_file", check_git=False)
validator.current_integration = {
"script": {
"commands": [
{
"name": "testing",
"arguments": [
{
"name": "test"
},
{
"name": "test"
}
]
}
]
}
}
assert validator.is_there_duplicate_args(), \
"The integration validator did not find duplicated args although there are duplicates"
def test_is_changed_subtype_non_changed():
validator = IntegrationValidator("temp_file", check_git=False)
validator.current_integration = {
"script": {
"type": "python",
"subtype": "python3"
}
}
validator.old_integration = {
"script": {
"type": "python",
"subtype": "python3"
}
}
assert validator.is_changed_subtype(), \
"The integration validator found changed subtype while it is valid"
def test_is_changed_subtype_changed():
validator = IntegrationValidator("temp_file", check_git=False)
validator.current_integration = {
"script": {
"type": "python",
"subtype": "python3"
}
}
validator.old_integration = {
"script": {
"type": "python",
"subtype": "python2"
}
}
assert validator.is_changed_subtype() is False, \
"The integration validator did not find changed subtype while it is changed"
def test_valid_subtype_lies():
validator = IntegrationValidator("temp_file", check_git=False)
validator.current_integration = {
"script": {
"type": "python",
"subtype": "lies"
}
}
validator.old_integration = None
assert validator.is_valid_subtype() is False, \
"The integration validator found valid subtype while it is invalid"
def test_is_default_arguments_non_default():
validator = IntegrationValidator("temp_file", check_git=False)
validator.current_integration = {
"script": {
"commands": [
{
"name": "file",
"arguments": [
{
"name": "file",
"required": True,
"default": False
},
{
"name": "verbose"
}
]
}
]
}
}
validator.old_integration = None
assert validator.is_default_arguments() is False, \
"The integration validator did not find invalid arg (needed to be default and not required)"
def test_is_default_arguments_ok():
validator = IntegrationValidator("temp_file", check_git=False)
validator.current_integration = {
"script": {
"commands": [
{
"name": "email",
"arguments": [
{
"name": "email",
"required": False,
"default": True
},
{
"name": "verbose"
}
]
}
]
}
}
validator.old_integration = None
assert validator.is_default_arguments() is True, \
"The integration validator found an invalid command arg while it is valid"
def test_is_outputs_for_reputations_commands_valid():
validator = IntegrationValidator("temp_file", check_git=False)
validator.current_integration = {
"script": {
"commands": [
{
"name": "panorama-commit-status",
"outputs": [
{
"contextPath": "Panorama.Commit.JobID",
"description": "Job ID of the configuration to be committed",
"type": "number"
},
{
"contextPath": "DBotScore.does.not.matter"
}
]
}
]
}
}
validator.old_integration = None
assert validator.is_outputs_for_reputations_commands_valid() is True, \
"The integration validator found invalid command outputs while it is valid"
validator_email = IntegrationValidator("temp_file", check_git=False)
validator_email.current_integration = {
"script": {
"commands": [
{
"name": "email",
"outputs": [
{
"contextPath": "DBotScore.Indicator",
"description": "The indicator that was tested.",
"type": "string"
},
{
"contextPath": "DBotScore.Type",
"description": "The indicator type.",
"type": "string"
},
{
"contextPath": "DBotScore.Vendor",
"description": "Vendor used to calculate the score.",
"type": "string"
},
{
"contextPath": "DBotScore.Sc0re",
"description": "The actual score.",
"type": "int"
},
{
"contextPath": "Email.To",
"description": "email to",
"type": "string"
},
]
}
]
}
}
validator_email.old_integration = None
assert validator_email.is_outputs_for_reputations_commands_valid() is False, \
"The integration validator did not find the invalid command output - DBotScore.Sc0re"
validator_file = IntegrationValidator("temp_file", check_git=False)
validator_file.current_integration = {
"script": {
"commands": [
{
"name": "file",
"outputs": [
{
"contextPath": "DBotScore.Indicator",
"description": "The indicator that was tested.",
"type": "string"
},
{
"contextPath": "DBotScore.Type",
"description": "The indicator type.",
"type": "string"
},
{
"contextPath": "DBotScore.Vendor",
"description": "Vendor used to calculate the score.",
"type": "string"
},
{
"contextPath": "DBotScore.Score",
"description": "The actual score.",
"type": "int"
},
{
"contextPath": "File.Md5",
"description": "The MD5 hash of the file.",
"type": "string"
},
]
}
]
}
}
validator_file.old_integration = None
assert validator_file.is_outputs_for_reputations_commands_valid() is False, \
"The integration validator did not find the invalid command output - File.Md5"
validator_ip = IntegrationValidator("temp_file", check_git=False)
validator_ip.current_integration = {
"script": {
"commands": [
{
"name": "ip",
"outputs": [
{
"contextPath": "DBotScore.Indicator",
"description": "The indicator that was tested.",
"type": "string"
},
{
"contextPath": "DBotScore.Type",
"description": "The indicator type.",
"type": "string"
},
{
"contextPath": "DBotScore.Vendor",
"description": "Vendor used to calculate the score.",
"type": "string"
},
{
"contextPath": "DBotScore.Score",
"description": "The actual score.",
"type": "int"
},
{
"contextPath": "IP.Address",
"description": "IP address",
"type": "string"
},
]
}
]
}
}
validator_ip.old_integration = None
assert validator_ip.is_outputs_for_reputations_commands_valid() is True, \
"The integration validator found invalid command outputs while it is valid"
def test_valid_new_beta_integration():
validator = IntegrationValidator("temp_file", check_git=False)
validator.old_integration = {}
validator.current_integration = {
"commonfields": {
"id": "newIntegration"
},
"name": "newIntegration",
"display": "newIntegration (Beta)",
"beta": True,
}
assert validator.is_valid_beta_integration(is_new=True) is True, \
"The Beta validator did not validate a new valid integration"
def test_new_beta_integration_missing_beta_in_display():
validator = IntegrationValidator("temp_file", check_git=False)
validator.old_integration = {}
validator.current_integration = {
"commonfields": {
"id": "newIntegration"
},
"name": "newIntegration",
"display": "newIntegration",
"beta": True,
}
assert validator.is_valid_beta_integration(is_new=True) is False, \
"The Beta validator approved the integration" \
"but it should have fail it for missing beta substring in 'display' field"
def test_new_beta_integration_with_beta_substring_in_id():
validator = IntegrationValidator("temp_file", check_git=False)
validator.old_integration = {}
validator.current_integration = {
"commonfields": {
"id": "newIntegration beta"
},
"name": "newIntegration",
"display": "newIntegration (Beta)",
"beta": True,
}
assert validator.is_valid_beta_integration(is_new=True) is False, \
"The beta validator approved the new beta integration," \
" but it should fail it because the 'id' field has a 'beta' substring in it. " \
"the validator should not allow it for new integration"
def test_new_beta_integration_with_beta_substring_in_name():
validator = IntegrationValidator("temp_file", check_git=False)
validator.old_integration = {}
validator.current_integration = {
"commonfields": {
"id": "newIntegration"
},
"name": "newIntegration beta",
"display": "newIntegration (Beta)",
"beta": True,
}
assert validator.is_valid_beta_integration(is_new=True) is False, \
"The beta validator approved the new beta integration," \
" but it should fail it because the 'name' field has a 'beta' substring in it. " \
"the validator should not allow it for new integration"
def test_cahnged_beta_integration_with_beta_substring_in_is_and_name():
validator = IntegrationValidator("temp_file", check_git=False)
validator.old_integration = {
"commonfields": {
"id": "newIntegration beta"
},
"name": "newIntegration beta",
"display": "newIntegration (Beta)",
"beta": True,
}
validator.current_integration = {
"commonfields": {
"id": "newIntegration beta"
},
"name": "newIntegration beta",
"display": "newIntegration changed (Beta)",
"beta": True,
}
assert validator.is_valid_beta_integration() is True, \
"The Beta validator failed the integration" \
"but it should have approved"
def test_changed_beta_integration_without_beta_field():
validator = IntegrationValidator("temp_file", check_git=False)
validator.old_integration = {
"commonfields": {
"id": "newIntegration beta"
},
"name": "newIntegration beta",
"display": "newIntegration (Beta)",
}
validator.current_integration = {
"commonfields": {
"id": "newIntegration beta"
},
"name": "newIntegration beta",
"display": "newIntegration changed (Beta)",
}
assert validator.is_valid_beta_integration() is False, \
"The Beta validator approved the integration" \
"but it should have fail it because it is missing 'beta' field with the value true"
def test_proxy_sanity_check():
validator = IntegrationValidator("temp_file", check_git=False)
validator.current_integration = {
"configuration": [
{
"name": "proxy",
"type": 8,
"display": "Use system proxy settings",
"required": False
}
]
}
assert validator.is_proxy_configured_correctly()
def test_proxy_wrong_type():
validator = IntegrationValidator("temp_file", check_git=False)
validator.current_integration = {
"configuration": [
{
"name": "proxy",
"type": 9,
"display": "Use system proxy settings",
"required": False
}
]
}
assert validator.is_proxy_configured_correctly() is False
def test_proxy_wrong_display():
validator = IntegrationValidator("temp_file", check_git=False)
validator.current_integration = {
"configuration": [
{
"name": "proxy",
"type": 8,
"display": "bla",
"required": False
}
]
}
assert validator.is_proxy_configured_correctly() is False
def test_proxy_wrong_required():
validator = IntegrationValidator("temp_file", check_git=False)
validator.current_integration = {
"configuration": [
{
"name": "proxy",
"type": 8,
"display": "Use system proxy settings",
"required": True
}
]
}
assert validator.is_proxy_configured_correctly() is False
def test_insecure_wrong_display():
validator = IntegrationValidator("temp_file", check_git=False)
validator.current_integration = {
"configuration": [
{
"name": "insecure",
"type": 8,
"display": "Use system proxy settings",
"required": False
}
]
}
assert validator.is_insecure_configured_correctly() is False
def test_unsecure_wrong_display():
validator = IntegrationValidator("temp_file", check_git=False)
validator.current_integration = {
"configuration": [
{
"name": "unsecure",
"type": 8,
"display": "Use system proxy settings",
"required": False
}
]
}
assert validator.is_insecure_configured_correctly() is False
def test_unsecure_correct_display():
validator = IntegrationValidator("temp_file", check_git=False)
validator.current_integration = {
"configuration": [
{
"name": "unsecure",
"type": 8,
"display": "Trust any certificate (not secure)",
"required": False
}
]
}
assert validator.is_insecure_configured_correctly()
def test_is_valid_category():
validator_siem = IntegrationValidator("temp_file", check_git=False)
validator_siem.current_integration = {"category": "Analytics & SIEMM"}
assert validator_siem.is_valid_category() is False
validator_endpoint = IntegrationValidator("temp_file", check_git=False)
validator_endpoint.current_integration = {"category": "Endpoint"}
assert validator_endpoint.is_valid_category()
| 30.101029
| 120
| 0.478153
| 2,417
| 32,178
| 6.136533
| 0.078196
| 0.045307
| 0.086839
| 0.102346
| 0.874865
| 0.847087
| 0.809668
| 0.755663
| 0.741437
| 0.718379
| 0
| 0.001519
| 0.427217
| 32,178
| 1,068
| 121
| 30.129213
| 0.803212
| 0
| 0
| 0.459721
| 0
| 0
| 0.241531
| 0.002113
| 0
| 0
| 0
| 0
| 0.049409
| 1
| 0.045113
| false
| 0
| 0.001074
| 0
| 0.046187
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
72755bc746d04b3e5d1fb511a7512ba186454e41
| 25,748
|
py
|
Python
|
sunshine_conversations_client/__init__.py
|
zendesk/sunshine-conversations-python
|
2d0240681b809ffd8ff0e9ed58b33aae844d29f6
|
[
"Apache-2.0"
] | 4
|
2020-09-27T14:28:25.000Z
|
2022-02-02T13:51:29.000Z
|
sunshine_conversations_client/__init__.py
|
zendesk/sunshine-conversations-python
|
2d0240681b809ffd8ff0e9ed58b33aae844d29f6
|
[
"Apache-2.0"
] | 3
|
2021-09-30T18:18:58.000Z
|
2021-12-04T07:55:23.000Z
|
sunshine_conversations_client/__init__.py
|
zendesk/sunshine-conversations-python
|
2d0240681b809ffd8ff0e9ed58b33aae844d29f6
|
[
"Apache-2.0"
] | 5
|
2020-11-07T02:08:18.000Z
|
2021-12-07T17:10:23.000Z
|
# coding: utf-8
# flake8: noqa
"""
Sunshine Conversations API
The version of the OpenAPI document: 9.4.5
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
__version__ = "9.4.6"
# import apis into sdk package
from sunshine_conversations_client.api.activities_api import ActivitiesApi
from sunshine_conversations_client.api.app_keys_api import AppKeysApi
from sunshine_conversations_client.api.apps_api import AppsApi
from sunshine_conversations_client.api.attachments_api import AttachmentsApi
from sunshine_conversations_client.api.clients_api import ClientsApi
from sunshine_conversations_client.api.conversations_api import ConversationsApi
from sunshine_conversations_client.api.custom_integration_api_keys_api import CustomIntegrationApiKeysApi
from sunshine_conversations_client.api.integrations_api import IntegrationsApi
from sunshine_conversations_client.api.messages_api import MessagesApi
from sunshine_conversations_client.api.o_auth_endpoints_api import OAuthEndpointsApi
from sunshine_conversations_client.api.participants_api import ParticipantsApi
from sunshine_conversations_client.api.switchboard_actions_api import SwitchboardActionsApi
from sunshine_conversations_client.api.switchboard_integrations_api import SwitchboardIntegrationsApi
from sunshine_conversations_client.api.switchboards_api import SwitchboardsApi
from sunshine_conversations_client.api.users_api import UsersApi
from sunshine_conversations_client.api.webhooks_api import WebhooksApi
# import ApiClient
from sunshine_conversations_client.api_client import ApiClient
from sunshine_conversations_client.configuration import Configuration
from sunshine_conversations_client.exceptions import OpenApiException
from sunshine_conversations_client.exceptions import ApiTypeError
from sunshine_conversations_client.exceptions import ApiValueError
from sunshine_conversations_client.exceptions import ApiKeyError
from sunshine_conversations_client.exceptions import ApiException
# import models into sdk package
from sunshine_conversations_client.model.accept_control_body import AcceptControlBody
from sunshine_conversations_client.model.action import Action
from sunshine_conversations_client.model.action_subset import ActionSubset
from sunshine_conversations_client.model.activity import Activity
from sunshine_conversations_client.model.activity_all_of import ActivityAllOf
from sunshine_conversations_client.model.activity_post import ActivityPost
from sunshine_conversations_client.model.activity_post_all_of import ActivityPostAllOf
from sunshine_conversations_client.model.activity_types import ActivityTypes
from sunshine_conversations_client.model.android import Android
from sunshine_conversations_client.model.android_all_of import AndroidAllOf
from sunshine_conversations_client.model.android_update import AndroidUpdate
from sunshine_conversations_client.model.android_update_all_of import AndroidUpdateAllOf
from sunshine_conversations_client.model.api_key import ApiKey
from sunshine_conversations_client.model.app import App
from sunshine_conversations_client.model.app_create_body import AppCreateBody
from sunshine_conversations_client.model.app_key import AppKey
from sunshine_conversations_client.model.app_key_create_body import AppKeyCreateBody
from sunshine_conversations_client.model.app_key_list_response import AppKeyListResponse
from sunshine_conversations_client.model.app_key_response import AppKeyResponse
from sunshine_conversations_client.model.app_list_filter import AppListFilter
from sunshine_conversations_client.model.app_list_response import AppListResponse
from sunshine_conversations_client.model.app_response import AppResponse
from sunshine_conversations_client.model.app_settings import AppSettings
from sunshine_conversations_client.model.app_sub_schema import AppSubSchema
from sunshine_conversations_client.model.app_update_body import AppUpdateBody
from sunshine_conversations_client.model.apple import Apple
from sunshine_conversations_client.model.apple_all_of import AppleAllOf
from sunshine_conversations_client.model.apple_update import AppleUpdate
from sunshine_conversations_client.model.attachment_delete_body import AttachmentDeleteBody
from sunshine_conversations_client.model.attachment_media_token_body import AttachmentMediaTokenBody
from sunshine_conversations_client.model.attachment_media_token_response import AttachmentMediaTokenResponse
from sunshine_conversations_client.model.attachment_response import AttachmentResponse
from sunshine_conversations_client.model.attachment_schema import AttachmentSchema
from sunshine_conversations_client.model.attachment_upload_body import AttachmentUploadBody
from sunshine_conversations_client.model.author import Author
from sunshine_conversations_client.model.author_webhook import AuthorWebhook
from sunshine_conversations_client.model.buy import Buy
from sunshine_conversations_client.model.carousel_message import CarouselMessage
from sunshine_conversations_client.model.carousel_message_display_settings import CarouselMessageDisplaySettings
from sunshine_conversations_client.model.client import Client
from sunshine_conversations_client.model.client_add_event import ClientAddEvent
from sunshine_conversations_client.model.client_add_event_all_of import ClientAddEventAllOf
from sunshine_conversations_client.model.client_add_event_all_of_payload import ClientAddEventAllOfPayload
from sunshine_conversations_client.model.client_association import ClientAssociation
from sunshine_conversations_client.model.client_create import ClientCreate
from sunshine_conversations_client.model.client_list_response import ClientListResponse
from sunshine_conversations_client.model.client_remove_event import ClientRemoveEvent
from sunshine_conversations_client.model.client_remove_event_all_of import ClientRemoveEventAllOf
from sunshine_conversations_client.model.client_remove_event_all_of_payload import ClientRemoveEventAllOfPayload
from sunshine_conversations_client.model.client_response import ClientResponse
from sunshine_conversations_client.model.client_type import ClientType
from sunshine_conversations_client.model.client_update_event import ClientUpdateEvent
from sunshine_conversations_client.model.client_update_event_all_of import ClientUpdateEventAllOf
from sunshine_conversations_client.model.client_update_event_all_of_payload import ClientUpdateEventAllOfPayload
from sunshine_conversations_client.model.confirmation import Confirmation
from sunshine_conversations_client.model.content import Content
from sunshine_conversations_client.model.conversation import Conversation
from sunshine_conversations_client.model.conversation_all_of import ConversationAllOf
from sunshine_conversations_client.model.conversation_create_body import ConversationCreateBody
from sunshine_conversations_client.model.conversation_create_event import ConversationCreateEvent
from sunshine_conversations_client.model.conversation_create_event_all_of import ConversationCreateEventAllOf
from sunshine_conversations_client.model.conversation_create_event_all_of_payload import ConversationCreateEventAllOfPayload
from sunshine_conversations_client.model.conversation_join_event import ConversationJoinEvent
from sunshine_conversations_client.model.conversation_join_event_all_of import ConversationJoinEventAllOf
from sunshine_conversations_client.model.conversation_join_event_all_of_payload import ConversationJoinEventAllOfPayload
from sunshine_conversations_client.model.conversation_leave_event import ConversationLeaveEvent
from sunshine_conversations_client.model.conversation_leave_event_all_of import ConversationLeaveEventAllOf
from sunshine_conversations_client.model.conversation_leave_event_all_of_payload import ConversationLeaveEventAllOfPayload
from sunshine_conversations_client.model.conversation_list_filter import ConversationListFilter
from sunshine_conversations_client.model.conversation_list_response import ConversationListResponse
from sunshine_conversations_client.model.conversation_message_delivery_channel_event import ConversationMessageDeliveryChannelEvent
from sunshine_conversations_client.model.conversation_message_delivery_channel_event_all_of import ConversationMessageDeliveryChannelEventAllOf
from sunshine_conversations_client.model.conversation_message_delivery_failure_event import ConversationMessageDeliveryFailureEvent
from sunshine_conversations_client.model.conversation_message_delivery_failure_event_all_of import ConversationMessageDeliveryFailureEventAllOf
from sunshine_conversations_client.model.conversation_message_delivery_payload import ConversationMessageDeliveryPayload
from sunshine_conversations_client.model.conversation_message_delivery_payload_destination import ConversationMessageDeliveryPayloadDestination
from sunshine_conversations_client.model.conversation_message_delivery_payload_external_messages import ConversationMessageDeliveryPayloadExternalMessages
from sunshine_conversations_client.model.conversation_message_delivery_payload_message import ConversationMessageDeliveryPayloadMessage
from sunshine_conversations_client.model.conversation_message_delivery_user_event import ConversationMessageDeliveryUserEvent
from sunshine_conversations_client.model.conversation_message_event import ConversationMessageEvent
from sunshine_conversations_client.model.conversation_message_event_all_of import ConversationMessageEventAllOf
from sunshine_conversations_client.model.conversation_message_event_all_of_payload import ConversationMessageEventAllOfPayload
from sunshine_conversations_client.model.conversation_postback_event import ConversationPostbackEvent
from sunshine_conversations_client.model.conversation_postback_event_all_of import ConversationPostbackEventAllOf
from sunshine_conversations_client.model.conversation_postback_event_all_of_payload import ConversationPostbackEventAllOfPayload
from sunshine_conversations_client.model.conversation_read_event import ConversationReadEvent
from sunshine_conversations_client.model.conversation_read_event_all_of import ConversationReadEventAllOf
from sunshine_conversations_client.model.conversation_read_event_all_of_payload import ConversationReadEventAllOfPayload
from sunshine_conversations_client.model.conversation_remove_event import ConversationRemoveEvent
from sunshine_conversations_client.model.conversation_remove_event_all_of import ConversationRemoveEventAllOf
from sunshine_conversations_client.model.conversation_remove_event_all_of_payload import ConversationRemoveEventAllOfPayload
from sunshine_conversations_client.model.conversation_response import ConversationResponse
from sunshine_conversations_client.model.conversation_truncated import ConversationTruncated
from sunshine_conversations_client.model.conversation_type import ConversationType
from sunshine_conversations_client.model.conversation_typing_event import ConversationTypingEvent
from sunshine_conversations_client.model.conversation_typing_event_all_of import ConversationTypingEventAllOf
from sunshine_conversations_client.model.conversation_typing_event_all_of_payload import ConversationTypingEventAllOfPayload
from sunshine_conversations_client.model.conversation_update_body import ConversationUpdateBody
from sunshine_conversations_client.model.custom import Custom
from sunshine_conversations_client.model.custom_all_of import CustomAllOf
from sunshine_conversations_client.model.custom_update import CustomUpdate
from sunshine_conversations_client.model.destination import Destination
from sunshine_conversations_client.model.device import Device
from sunshine_conversations_client.model.event_sub_schema import EventSubSchema
from sunshine_conversations_client.model.extra_channel_options import ExtraChannelOptions
from sunshine_conversations_client.model.extra_channel_options_messenger import ExtraChannelOptionsMessenger
from sunshine_conversations_client.model.field import Field
from sunshine_conversations_client.model.file_message import FileMessage
from sunshine_conversations_client.model.form_message import FormMessage
from sunshine_conversations_client.model.form_response_message import FormResponseMessage
from sunshine_conversations_client.model.image_message import ImageMessage
from sunshine_conversations_client.model.inline_object import InlineObject
from sunshine_conversations_client.model.instagram import Instagram
from sunshine_conversations_client.model.instagram_all_of import InstagramAllOf
from sunshine_conversations_client.model.instagram_update import InstagramUpdate
from sunshine_conversations_client.model.instagram_update_all_of import InstagramUpdateAllOf
from sunshine_conversations_client.model.integration import Integration
from sunshine_conversations_client.model.integration_api_key import IntegrationApiKey
from sunshine_conversations_client.model.integration_api_key_list_response import IntegrationApiKeyListResponse
from sunshine_conversations_client.model.integration_api_key_response import IntegrationApiKeyResponse
from sunshine_conversations_client.model.integration_id import IntegrationId
from sunshine_conversations_client.model.integration_list_filter import IntegrationListFilter
from sunshine_conversations_client.model.integration_list_response import IntegrationListResponse
from sunshine_conversations_client.model.integration_response import IntegrationResponse
from sunshine_conversations_client.model.integration_type import IntegrationType
from sunshine_conversations_client.model.integration_update import IntegrationUpdate
from sunshine_conversations_client.model.integration_update_base import IntegrationUpdateBase
from sunshine_conversations_client.model.ios import Ios
from sunshine_conversations_client.model.ios_all_of import IosAllOf
from sunshine_conversations_client.model.ios_update import IosUpdate
from sunshine_conversations_client.model.ios_update_all_of import IosUpdateAllOf
from sunshine_conversations_client.model.item import Item
from sunshine_conversations_client.model.line import Line
from sunshine_conversations_client.model.line_all_of import LineAllOf
from sunshine_conversations_client.model.line_update import LineUpdate
from sunshine_conversations_client.model.link import Link
from sunshine_conversations_client.model.links import Links
from sunshine_conversations_client.model.list_message import ListMessage
from sunshine_conversations_client.model.location_message import LocationMessage
from sunshine_conversations_client.model.location_message_coordinates import LocationMessageCoordinates
from sunshine_conversations_client.model.location_message_location import LocationMessageLocation
from sunshine_conversations_client.model.location_request import LocationRequest
from sunshine_conversations_client.model.mailgun import Mailgun
from sunshine_conversations_client.model.mailgun_all_of import MailgunAllOf
from sunshine_conversations_client.model.mailgun_update import MailgunUpdate
from sunshine_conversations_client.model.mailgun_update_all_of import MailgunUpdateAllOf
from sunshine_conversations_client.model.match_criteria import MatchCriteria
from sunshine_conversations_client.model.match_criteria_base import MatchCriteriaBase
from sunshine_conversations_client.model.match_criteria_mailgun import MatchCriteriaMailgun
from sunshine_conversations_client.model.match_criteria_mailgun_all_of import MatchCriteriaMailgunAllOf
from sunshine_conversations_client.model.match_criteria_messagebird import MatchCriteriaMessagebird
from sunshine_conversations_client.model.match_criteria_messagebird_all_of import MatchCriteriaMessagebirdAllOf
from sunshine_conversations_client.model.match_criteria_twilio import MatchCriteriaTwilio
from sunshine_conversations_client.model.match_criteria_twilio_all_of import MatchCriteriaTwilioAllOf
from sunshine_conversations_client.model.match_criteria_whatsapp import MatchCriteriaWhatsapp
from sunshine_conversations_client.model.match_criteria_whatsapp_all_of import MatchCriteriaWhatsappAllOf
from sunshine_conversations_client.model.message import Message
from sunshine_conversations_client.model.message_bird_update import MessageBirdUpdate
from sunshine_conversations_client.model.message_list_response import MessageListResponse
from sunshine_conversations_client.model.message_override import MessageOverride
from sunshine_conversations_client.model.message_override_apple import MessageOverrideApple
from sunshine_conversations_client.model.message_override_line import MessageOverrideLine
from sunshine_conversations_client.model.message_override_messenger import MessageOverrideMessenger
from sunshine_conversations_client.model.message_override_payload import MessageOverridePayload
from sunshine_conversations_client.model.message_override_whatsapp import MessageOverrideWhatsapp
from sunshine_conversations_client.model.message_post import MessagePost
from sunshine_conversations_client.model.message_post_response import MessagePostResponse
from sunshine_conversations_client.model.message_webhook import MessageWebhook
from sunshine_conversations_client.model.messagebird import Messagebird
from sunshine_conversations_client.model.messagebird_all_of import MessagebirdAllOf
from sunshine_conversations_client.model.messenger import Messenger
from sunshine_conversations_client.model.messenger_all_of import MessengerAllOf
from sunshine_conversations_client.model.messenger_update import MessengerUpdate
from sunshine_conversations_client.model.meta import Meta
from sunshine_conversations_client.model.offer_control_body import OfferControlBody
from sunshine_conversations_client.model.page import Page
from sunshine_conversations_client.model.participant import Participant
from sunshine_conversations_client.model.participant_join_body import ParticipantJoinBody
from sunshine_conversations_client.model.participant_leave_body import ParticipantLeaveBody
from sunshine_conversations_client.model.participant_leave_body_participant_id import ParticipantLeaveBodyParticipantId
from sunshine_conversations_client.model.participant_leave_body_user_external_id import ParticipantLeaveBodyUserExternalId
from sunshine_conversations_client.model.participant_leave_body_user_id import ParticipantLeaveBodyUserId
from sunshine_conversations_client.model.participant_list_response import ParticipantListResponse
from sunshine_conversations_client.model.participant_response import ParticipantResponse
from sunshine_conversations_client.model.participant_sub_schema import ParticipantSubSchema
from sunshine_conversations_client.model.participant_with_user_external_id import ParticipantWithUserExternalId
from sunshine_conversations_client.model.participant_with_user_id import ParticipantWithUserId
from sunshine_conversations_client.model.pass_control_body import PassControlBody
from sunshine_conversations_client.model.postback import Postback
from sunshine_conversations_client.model.postback_webhook import PostbackWebhook
from sunshine_conversations_client.model.prechat_capture import PrechatCapture
from sunshine_conversations_client.model.profile import Profile
from sunshine_conversations_client.model.quoted_message import QuotedMessage
from sunshine_conversations_client.model.quoted_message_external_message_id import QuotedMessageExternalMessageId
from sunshine_conversations_client.model.quoted_message_message import QuotedMessageMessage
from sunshine_conversations_client.model.referral import Referral
from sunshine_conversations_client.model.referral_details import ReferralDetails
from sunshine_conversations_client.model.reply import Reply
from sunshine_conversations_client.model.source import Source
from sunshine_conversations_client.model.source_webhook import SourceWebhook
from sunshine_conversations_client.model.status import Status
from sunshine_conversations_client.model.switchboard import Switchboard
from sunshine_conversations_client.model.switchboard_accept_control import SwitchboardAcceptControl
from sunshine_conversations_client.model.switchboard_accept_control_all_of import SwitchboardAcceptControlAllOf
from sunshine_conversations_client.model.switchboard_accept_control_all_of_payload import SwitchboardAcceptControlAllOfPayload
from sunshine_conversations_client.model.switchboard_accept_control_failure import SwitchboardAcceptControlFailure
from sunshine_conversations_client.model.switchboard_accept_control_failure_all_of import SwitchboardAcceptControlFailureAllOf
from sunshine_conversations_client.model.switchboard_accept_control_failure_all_of_payload import SwitchboardAcceptControlFailureAllOfPayload
from sunshine_conversations_client.model.switchboard_integration import SwitchboardIntegration
from sunshine_conversations_client.model.switchboard_integration_create_body import SwitchboardIntegrationCreateBody
from sunshine_conversations_client.model.switchboard_integration_list_response import SwitchboardIntegrationListResponse
from sunshine_conversations_client.model.switchboard_integration_response import SwitchboardIntegrationResponse
from sunshine_conversations_client.model.switchboard_integration_update_body import SwitchboardIntegrationUpdateBody
from sunshine_conversations_client.model.switchboard_integration_webhook import SwitchboardIntegrationWebhook
from sunshine_conversations_client.model.switchboard_list_response import SwitchboardListResponse
from sunshine_conversations_client.model.switchboard_offer_control import SwitchboardOfferControl
from sunshine_conversations_client.model.switchboard_offer_control_all_of import SwitchboardOfferControlAllOf
from sunshine_conversations_client.model.switchboard_offer_control_all_of_payload import SwitchboardOfferControlAllOfPayload
from sunshine_conversations_client.model.switchboard_offer_control_failure import SwitchboardOfferControlFailure
from sunshine_conversations_client.model.switchboard_pass_control import SwitchboardPassControl
from sunshine_conversations_client.model.switchboard_pass_control_all_of import SwitchboardPassControlAllOf
from sunshine_conversations_client.model.switchboard_pass_control_all_of_payload import SwitchboardPassControlAllOfPayload
from sunshine_conversations_client.model.switchboard_pass_control_failure import SwitchboardPassControlFailure
from sunshine_conversations_client.model.switchboard_response import SwitchboardResponse
from sunshine_conversations_client.model.switchboard_update_body import SwitchboardUpdateBody
from sunshine_conversations_client.model.target import Target
from sunshine_conversations_client.model.telegram import Telegram
from sunshine_conversations_client.model.telegram_all_of import TelegramAllOf
from sunshine_conversations_client.model.telegram_update import TelegramUpdate
from sunshine_conversations_client.model.template_message import TemplateMessage
from sunshine_conversations_client.model.text_message import TextMessage
from sunshine_conversations_client.model.twilio import Twilio
from sunshine_conversations_client.model.twilio_all_of import TwilioAllOf
from sunshine_conversations_client.model.twilio_update import TwilioUpdate
from sunshine_conversations_client.model.twitter import Twitter
from sunshine_conversations_client.model.twitter_all_of import TwitterAllOf
from sunshine_conversations_client.model.twitter_update import TwitterUpdate
from sunshine_conversations_client.model.user import User
from sunshine_conversations_client.model.user_all_of import UserAllOf
from sunshine_conversations_client.model.user_create_body import UserCreateBody
from sunshine_conversations_client.model.user_merge_event import UserMergeEvent
from sunshine_conversations_client.model.user_merge_event_all_of import UserMergeEventAllOf
from sunshine_conversations_client.model.user_merge_event_all_of_payload import UserMergeEventAllOfPayload
from sunshine_conversations_client.model.user_merge_event_all_of_payload_merged_clients import UserMergeEventAllOfPayloadMergedClients
from sunshine_conversations_client.model.user_merge_event_all_of_payload_merged_conversations import UserMergeEventAllOfPayloadMergedConversations
from sunshine_conversations_client.model.user_merge_event_all_of_payload_merged_users import UserMergeEventAllOfPayloadMergedUsers
from sunshine_conversations_client.model.user_response import UserResponse
from sunshine_conversations_client.model.user_truncated import UserTruncated
from sunshine_conversations_client.model.user_update_body import UserUpdateBody
from sunshine_conversations_client.model.viber import Viber
from sunshine_conversations_client.model.viber_all_of import ViberAllOf
from sunshine_conversations_client.model.viber_update import ViberUpdate
from sunshine_conversations_client.model.web import Web
from sunshine_conversations_client.model.web_all_of import WebAllOf
from sunshine_conversations_client.model.web_update import WebUpdate
from sunshine_conversations_client.model.web_update_all_of import WebUpdateAllOf
from sunshine_conversations_client.model.webhook import Webhook
from sunshine_conversations_client.model.webhook_body import WebhookBody
from sunshine_conversations_client.model.webhook_create_body import WebhookCreateBody
from sunshine_conversations_client.model.webhook_list_response import WebhookListResponse
from sunshine_conversations_client.model.webhook_response import WebhookResponse
from sunshine_conversations_client.model.webhook_sub_schema import WebhookSubSchema
from sunshine_conversations_client.model.webview import Webview
from sunshine_conversations_client.model.whats_app_update import WhatsAppUpdate
from sunshine_conversations_client.model.whats_app_update_all_of import WhatsAppUpdateAllOf
from sunshine_conversations_client.model.whatsapp import Whatsapp
from sunshine_conversations_client.model.whatsapp_all_of import WhatsappAllOf
| 82.525641
| 154
| 0.929121
| 2,926
| 25,748
| 7.780246
| 0.138072
| 0.268438
| 0.318471
| 0.394904
| 0.627323
| 0.557083
| 0.338326
| 0.227103
| 0.130815
| 0.110301
| 0
| 0.000326
| 0.047538
| 25,748
| 311
| 155
| 82.790997
| 0.927948
| 0.008544
| 0
| 0
| 1
| 0
| 0.000196
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.017123
| 0.996575
| 0
| 0.996575
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
72761545ae336a19e68ea01bb8ef61ca83d8229d
| 1,282
|
py
|
Python
|
demo_pshape.py
|
sam1902/pshape
|
b94b474ecd528284307907d85455e6252946fb95
|
[
"BSD-3-Clause"
] | null | null | null |
demo_pshape.py
|
sam1902/pshape
|
b94b474ecd528284307907d85455e6252946fb95
|
[
"BSD-3-Clause"
] | null | null | null |
demo_pshape.py
|
sam1902/pshape
|
b94b474ecd528284307907d85455e6252946fb95
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
"""Module doc"""
from pshape import pshape
import torch
import numpy as np
def main():
"""Main function"""
print(">>> pshape(np.arange(10).reshape(5,2,1), heading=True)")
print()
pshape(np.arange(10).reshape(5,2,1), heading=True)
print()
print(">>> pshape(np.eye(4), np.arange(10).reshape(5,2,1), heading=True)")
print()
pshape(np.eye(4), np.arange(10).reshape(5,2,1), heading=True)
print()
print(">>> cool_arr1 = np.random.rand(123,4,2,1)")
print(">>> cool_arr2 = np.random.rand(123,4,2,2)")
print(">>> cool_arr3 = np.random.rand(123,4,2,3)")
cool_arr1 = np.random.rand(123,4,2,1)
cool_arr2 = np.random.rand(123,4,2,2)
cool_arr3 = np.random.rand(123,4,2,3)
print(">>> pshape(cool_arr1, cool_arr2, cool_arr3)")
print()
pshape(cool_arr1, cool_arr2, cool_arr3)
print()
print(">>> pshape(cool_arr1, np.arange(12).reshape(3,4), cool_arr3)")
print()
pshape(cool_arr1, np.arange(12).reshape(3,4), cool_arr3)
print()
print(">>> pshape(torch.arange(12).view(3,4), np.arange(12).reshape(3,4), cool_arr3)")
print()
pshape(torch.arange(12).view(3,4), np.arange(12).reshape(3,4), cool_arr3, heading=True)
print()
if __name__ == "__main__":
main()
| 29.136364
| 91
| 0.620125
| 210
| 1,282
| 3.661905
| 0.185714
| 0.143043
| 0.093628
| 0.117035
| 0.828349
| 0.828349
| 0.828349
| 0.828349
| 0.828349
| 0.527958
| 0
| 0.094393
| 0.165367
| 1,282
| 43
| 92
| 29.813953
| 0.624299
| 0.035881
| 0
| 0.3125
| 0
| 0.09375
| 0.351594
| 0.186427
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03125
| false
| 0
| 0.09375
| 0
| 0.125
| 0.5625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
72976487139c1c744f4018393d1580ee68ac1e88
| 16,745
|
py
|
Python
|
mayan/apps/user_management/tests/test_api.py
|
Fourdee/mayan-edms
|
39a94f8b4fed519a3b20ab419e920ea53c11eb84
|
[
"Apache-2.0"
] | 4
|
2019-02-17T08:35:42.000Z
|
2019-03-28T06:02:11.000Z
|
mayan/apps/user_management/tests/test_api.py
|
zhoubear/mayan-edms
|
e9bc10a056c3379b57115c6e83022f48c6298e1d
|
[
"Apache-2.0"
] | 1
|
2018-10-11T13:01:34.000Z
|
2018-10-11T13:01:34.000Z
|
mayan/apps/user_management/tests/test_api.py
|
prezi/mayan-edms
|
e9bc10a056c3379b57115c6e83022f48c6298e1d
|
[
"Apache-2.0"
] | 3
|
2019-01-29T13:21:57.000Z
|
2019-10-27T03:20:15.000Z
|
from __future__ import unicode_literals
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
from rest_framework import status
from rest_api.tests import BaseAPITestCase
from ..permissions import (
permission_group_create, permission_group_delete,
permission_group_edit, permission_group_view,
permission_user_create, permission_user_delete,
permission_user_edit, permission_user_view
)
from .literals import (
TEST_GROUP_2_NAME, TEST_GROUP_2_NAME_EDITED, TEST_USER_2_EMAIL,
TEST_USER_2_PASSWORD, TEST_USER_2_USERNAME, TEST_USER_2_USERNAME_EDITED,
TEST_USER_2_PASSWORD_EDITED
)
class UserManagementUserAPITestCase(BaseAPITestCase):
def setUp(self):
super(UserManagementUserAPITestCase, self).setUp()
self.login_user()
# User create
def _create_user(self):
return get_user_model().objects.create_user(
username=TEST_USER_2_USERNAME, email=TEST_USER_2_EMAIL,
password=TEST_USER_2_PASSWORD
)
def _request_user_create(self):
return self.post(
viewname='rest_api:user-list', data={
'email': TEST_USER_2_EMAIL, 'password': TEST_USER_2_PASSWORD,
'username': TEST_USER_2_USERNAME,
}
)
def test_user_create_no_permission(self):
response = self._request_user_create()
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
# Default two users, the test admin and the test user
self.assertEqual(get_user_model().objects.count(), 2)
def test_user_create_with_permission(self):
self.grant_permission(permission=permission_user_create)
response = self._request_user_create()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
user = get_user_model().objects.get(pk=response.data['id'])
self.assertEqual(user.username, TEST_USER_2_USERNAME)
self.assertEqual(get_user_model().objects.count(), 3)
def _request_create_user_with_extra_data(self):
return self.post(
viewname='rest_api:user-list', data={
'email': TEST_USER_2_EMAIL, 'password': TEST_USER_2_PASSWORD,
'username': TEST_USER_2_USERNAME,
'groups_pk_list': self.groups_pk_list
}
)
def test_user_create_with_group_no_permission(self):
group_1 = Group.objects.create(name=TEST_GROUP_2_NAME)
self.groups_pk_list = '{}'.format(group_1.pk)
response = self._request_create_user_with_extra_data()
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_user_create_with_group_with_permission(self):
group_1 = Group.objects.create(name=TEST_GROUP_2_NAME)
self.groups_pk_list = '{}'.format(group_1.pk)
self.grant_permission(permission=permission_user_create)
response = self._request_create_user_with_extra_data()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
user = get_user_model().objects.get(pk=response.data['id'])
self.assertEqual(user.username, TEST_USER_2_USERNAME)
self.assertQuerysetEqual(user.groups.all(), (repr(group_1),))
def test_user_create_with_groups_no_permission(self):
group_1 = Group.objects.create(name='test group 1')
group_2 = Group.objects.create(name='test group 2')
self.groups_pk_list = '{},{}'.format(group_1.pk, group_2.pk)
response = self._request_create_user_with_extra_data()
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_user_create_with_groups_with_permission(self):
group_1 = Group.objects.create(name='test group 1')
group_2 = Group.objects.create(name='test group 2')
self.groups_pk_list = '{},{}'.format(group_1.pk, group_2.pk)
self.grant_permission(permission=permission_user_create)
response = self._request_create_user_with_extra_data()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
user = get_user_model().objects.get(pk=response.data['id'])
self.assertEqual(user.username, TEST_USER_2_USERNAME)
self.assertQuerysetEqual(
user.groups.all().order_by('name'), (repr(group_1), repr(group_2))
)
# User login
def test_user_create_login(self):
self._create_user()
self.assertTrue(
self.login(
username=TEST_USER_2_USERNAME, password=TEST_USER_2_PASSWORD
)
)
# User password change
def test_user_create_login_password_change_no_access(self):
user = self._create_user()
self.patch(
viewname='rest_api:user-detail', args=(user.pk,), data={
'password': TEST_USER_2_PASSWORD_EDITED,
}
)
self.assertFalse(
self.client.login(
username=TEST_USER_2_USERNAME, password=TEST_USER_2_PASSWORD_EDITED
)
)
def test_user_create_login_password_change_with_access(self):
user = self._create_user()
self.grant_access(permission=permission_user_edit, obj=user)
self.patch(
viewname='rest_api:user-detail', args=(user.pk,), data={
'password': TEST_USER_2_PASSWORD_EDITED,
}
)
self.assertTrue(
self.client.login(
username=TEST_USER_2_USERNAME, password=TEST_USER_2_PASSWORD_EDITED
)
)
# User edit
def _request_user_edit_via_put(self, user):
return self.put(
viewname='rest_api:user-detail', args=(user.pk,),
data={'username': TEST_USER_2_USERNAME_EDITED}
)
def test_user_edit_via_put_no_access(self):
user = self._create_user()
response = self._request_user_edit_via_put(user=user)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
user.refresh_from_db()
self.assertEqual(user.username, TEST_USER_2_USERNAME)
def test_user_edit_via_put_with_access(self):
user = self._create_user()
self.grant_access(permission=permission_user_edit, obj=user)
response = self._request_user_edit_via_put(user=user)
self.assertEqual(response.status_code, status.HTTP_200_OK)
user.refresh_from_db()
self.assertEqual(user.username, TEST_USER_2_USERNAME_EDITED)
def _request_user_edit_via_patch(self, user):
return self.patch(
viewname='rest_api:user-detail', args=(user.pk,),
data={'username': TEST_USER_2_USERNAME_EDITED}
)
def test_user_edit_via_patch_no_access(self):
user = self._create_user()
response = self._request_user_edit_via_patch(user=user)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
user.refresh_from_db()
self.assertEqual(user.username, TEST_USER_2_USERNAME)
def test_user_edit_via_patch_with_access(self):
user = self._create_user()
self.grant_access(permission=permission_user_edit, obj=user)
response = self._request_user_edit_via_patch(user=user)
self.assertEqual(response.status_code, status.HTTP_200_OK)
user.refresh_from_db()
self.assertEqual(user.username, TEST_USER_2_USERNAME_EDITED)
def _request_user_edit_via_patch_with_extra_data(self, user, group):
return self.patch(
viewname='rest_api:user-detail', args=(user.pk,),
data={'groups_pk_list': '{}'.format(group.pk)}
)
def test_user_edit_add_groups_via_patch_no_access(self):
group = Group.objects.create(name=TEST_GROUP_2_NAME)
user = self._create_user()
response = self._request_user_edit_via_patch_with_extra_data(
user=user, group=group
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
user.refresh_from_db()
self.assertEqual(user.username, TEST_USER_2_USERNAME)
self.assertQuerysetEqual(
user.groups.all(), ()
)
def test_user_edit_add_groups_via_patch_with_access(self):
group = Group.objects.create(name=TEST_GROUP_2_NAME)
user = self._create_user()
self.grant_access(permission=permission_user_edit, obj=user)
response = self._request_user_edit_via_patch_with_extra_data(
user=user, group=group
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
user.refresh_from_db()
self.assertEqual(user.username, TEST_USER_2_USERNAME)
self.assertQuerysetEqual(
user.groups.all(), (repr(group),)
)
# User delete
def _request_user_delete(self, user):
return self.delete(
viewname='rest_api:user-detail', args=(user.pk,)
)
def test_user_delete_no_access(self):
user = self._create_user()
response = self._request_user_delete(user=user)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertTrue(get_user_model().objects.filter(pk=user.pk).exists())
def test_user_delete_with_access(self):
user = self._create_user()
self.grant_access(permission=permission_user_delete, obj=user)
response = self._request_user_delete(user=user)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertFalse(get_user_model().objects.filter(pk=user.pk).exists())
# User view
def _request_user_group_view(self, user):
return self.get(
viewname='rest_api:users-group-list', args=(user.pk,)
)
def test_user_group_list_no_access(self):
group = Group.objects.create(name=TEST_GROUP_2_NAME)
user = self._create_user()
user.groups.add(group)
response = self._request_user_group_view(user=user)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_user_group_list_with_user_access(self):
group = Group.objects.create(name=TEST_GROUP_2_NAME)
user = self._create_user()
user.groups.add(group)
self.grant_access(permission=permission_user_view, obj=user)
response = self._request_user_group_view(user=user)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 0)
def test_user_group_list_with_group_access(self):
group = Group.objects.create(name=TEST_GROUP_2_NAME)
user = self._create_user()
user.groups.add(group)
self.grant_access(permission=permission_group_view, obj=group)
response = self._request_user_group_view(user=user)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_user_group_list_with_access(self):
group = Group.objects.create(name=TEST_GROUP_2_NAME)
user = self._create_user()
user.groups.add(group)
self.grant_access(permission=permission_user_view, obj=user)
self.grant_access(permission=permission_group_view, obj=group)
response = self._request_user_group_view(user=user)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
def _request_user_group_add(self, user, group):
return self.post(
viewname='rest_api:users-group-list', args=(user.pk,), data={
'group_pk_list': '{}'.format(group.pk)
}
)
def test_user_group_add_no_access(self):
group = Group.objects.create(name=TEST_GROUP_2_NAME)
user = self._create_user()
response = self._request_user_group_add(user=user, group=group)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
user.refresh_from_db()
self.assertEqual(group.user_set.first(), None)
def test_user_group_add_with_user_access(self):
group = Group.objects.create(name=TEST_GROUP_2_NAME)
user = self._create_user()
self.grant_access(permission=permission_user_edit, obj=user)
response = self._request_user_group_add(user=user, group=group)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
# FIXME: Should this endpoint return a 201 or a 200 since
# the user is being edited and there is not resource creation
# happening.
user.refresh_from_db()
self.assertEqual(group.user_set.first(), None)
def test_user_group_add_with_group_access(self):
group = Group.objects.create(name=TEST_GROUP_2_NAME)
user = self._create_user()
self.grant_access(permission=permission_group_view, obj=group)
response = self._request_user_group_add(user=user, group=group)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
# FIXME: Should this endpoint return a 201 or a 200 since
# the user is being edited and there is not resource creation
# happening.
user.refresh_from_db()
self.assertEqual(group.user_set.first(), None)
def test_user_group_add_with_access(self):
group = Group.objects.create(name=TEST_GROUP_2_NAME)
user = self._create_user()
self.grant_access(permission=permission_user_edit, obj=user)
self.grant_access(permission=permission_group_view, obj=group)
response = self._request_user_group_add(user=user, group=group)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
# FIXME: Should this endpoint return a 201 or a 200 since
# the user is being edited and there is not resource creation
# happening.
user.refresh_from_db()
self.assertEqual(group.user_set.first(), user)
class UserManagementGroupAPITestCase(BaseAPITestCase):
def setUp(self):
super(UserManagementGroupAPITestCase, self).setUp()
self.login_user()
def _request_group_create(self):
return self.post(
viewname='rest_api:group-list', data={
'name': TEST_GROUP_2_NAME
}
)
def test_group_create_no_permission(self):
response = self._request_group_create()
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertFalse(
TEST_GROUP_2_NAME in list(Group.objects.values_list('name', flat=True))
)
def test_group_create_with_permission(self):
self.grant_permission(permission=permission_group_create)
response = self._request_group_create()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertTrue(
TEST_GROUP_2_NAME in list(Group.objects.values_list('name', flat=True))
)
def _request_group_edit_via_patch(self):
return self.patch(
viewname='rest_api:group-detail', args=(self.group.pk,),
data={
'name': TEST_GROUP_2_NAME_EDITED
}
)
def test_group_edit_via_patch_no_access(self):
self.group = Group.objects.create(name=TEST_GROUP_2_NAME)
response = self._request_group_edit_via_patch()
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.group.refresh_from_db()
self.assertEqual(self.group.name, TEST_GROUP_2_NAME)
def test_group_edit_via_patch_with_access(self):
self.group = Group.objects.create(name=TEST_GROUP_2_NAME)
self.grant_access(permission=permission_group_edit, obj=self.group)
response = self._request_group_edit_via_patch()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.group.refresh_from_db()
self.assertEqual(self.group.name, TEST_GROUP_2_NAME_EDITED)
def _request_group_delete(self):
return self.delete(
viewname='rest_api:group-detail', args=(self.group.pk,)
)
def test_group_delete_no_access(self):
self.group = Group.objects.create(name=TEST_GROUP_2_NAME)
response = self._request_group_delete()
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertTrue(
TEST_GROUP_2_NAME in list(Group.objects.values_list('name', flat=True))
)
def test_group_delete_with_access(self):
self.group = Group.objects.create(name=TEST_GROUP_2_NAME)
self.grant_access(permission=permission_group_delete, obj=self.group)
response = self._request_group_delete()
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertFalse(
TEST_GROUP_2_NAME in list(Group.objects.values_list('name', flat=True))
)
| 38.318078
| 83
| 0.690475
| 2,158
| 16,745
| 4.969416
| 0.055144
| 0.044013
| 0.027695
| 0.075718
| 0.898079
| 0.875979
| 0.844368
| 0.815834
| 0.791682
| 0.761376
| 0
| 0.014019
| 0.216184
| 16,745
| 436
| 84
| 38.405963
| 0.803048
| 0.030337
| 0
| 0.587879
| 0
| 0
| 0.030333
| 0.005672
| 0
| 0
| 0
| 0.002294
| 0.181818
| 1
| 0.136364
| false
| 0.036364
| 0.021212
| 0.036364
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
729aec100312ae793527a7e94b62e9476eb868b9
| 113
|
py
|
Python
|
bodyhands/__init__.py
|
cvlab-stonybrook/BodyHands
|
dcfe470f6fd31a048d4d17d4ae9a2a524538b380
|
[
"MIT"
] | 1
|
2022-03-06T08:18:33.000Z
|
2022-03-06T08:18:33.000Z
|
bodyhands/__init__.py
|
cvlab-stonybrook/BodyHands
|
dcfe470f6fd31a048d4d17d4ae9a2a524538b380
|
[
"MIT"
] | null | null | null |
bodyhands/__init__.py
|
cvlab-stonybrook/BodyHands
|
dcfe470f6fd31a048d4d17d4ae9a2a524538b380
|
[
"MIT"
] | null | null | null |
from .config import *
from .data import *
from .evaluation import *
from .modeling import *
from .utils import *
| 18.833333
| 25
| 0.734513
| 15
| 113
| 5.533333
| 0.466667
| 0.481928
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.176991
| 113
| 5
| 26
| 22.6
| 0.892473
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f42cb37aa2d3c697c189931afe7aa988e5cfa79d
| 293
|
py
|
Python
|
pkg_radish_selenium/radish_selenium/__init__.py
|
bbielicki/radish-bdd-extensions
|
7f1317461af23a70f2a551b66299b54e296af32f
|
[
"BSD-3-Clause"
] | 4
|
2019-09-19T21:25:26.000Z
|
2019-11-10T06:09:06.000Z
|
pkg_radish_selenium/radish_selenium/__init__.py
|
bbielicki/radish-bdd-extensions
|
7f1317461af23a70f2a551b66299b54e296af32f
|
[
"BSD-3-Clause"
] | null | null | null |
pkg_radish_selenium/radish_selenium/__init__.py
|
bbielicki/radish-bdd-extensions
|
7f1317461af23a70f2a551b66299b54e296af32f
|
[
"BSD-3-Clause"
] | 2
|
2019-09-17T11:26:59.000Z
|
2020-01-23T20:20:43.000Z
|
# © 2019 Nokia
# Licensed under the BSD 3 Clause license
# SPDX-License-Identifier: BSD-3-Clause
import os
def get_radish_selenium_dir():
return os.path.abspath(os.path.dirname(__file__))
def get_radish_selenium_etc_dir():
return os.path.join(get_radish_selenium_dir(), 'etc')
| 17.235294
| 57
| 0.74744
| 46
| 293
| 4.478261
| 0.565217
| 0.131068
| 0.247573
| 0.194175
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023904
| 0.143345
| 293
| 16
| 58
| 18.3125
| 0.792829
| 0.307167
| 0
| 0
| 0
| 0
| 0.015228
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| true
| 0
| 0.2
| 0.4
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
f439440399888de027a2324c8c89546e0f24d846
| 7,422
|
py
|
Python
|
povary/apps/events/migrations/0006_auto__add_field_event_visits_num__add_field_eventcategory_visits_num.py
|
TorinAsakura/cooking
|
cf0c78f613fa9ce0fcd4ec7a397ab880d9dd631a
|
[
"BSD-3-Clause"
] | null | null | null |
povary/apps/events/migrations/0006_auto__add_field_event_visits_num__add_field_eventcategory_visits_num.py
|
TorinAsakura/cooking
|
cf0c78f613fa9ce0fcd4ec7a397ab880d9dd631a
|
[
"BSD-3-Clause"
] | null | null | null |
povary/apps/events/migrations/0006_auto__add_field_event_visits_num__add_field_eventcategory_visits_num.py
|
TorinAsakura/cooking
|
cf0c78f613fa9ce0fcd4ec7a397ab880d9dd631a
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Event.visits_num'
db.add_column('events_event', 'visits_num',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
# Adding field 'EventCategory.visits_num'
db.add_column('events_eventcategory', 'visits_num',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Event.visits_num'
db.delete_column('events_event', 'visits_num')
# Deleting field 'EventCategory.visits_num'
db.delete_column('events_eventcategory', 'visits_num')
models = {
'cities_light.city': {
'Meta': {'unique_together': "(('region', 'name'),)", 'object_name': 'City'},
'alternate_names': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cities_light.Country']"}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'geoname_id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '5', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '5', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'name_ascii': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'blank': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cities_light.Region']", 'null': 'True', 'blank': 'True'}),
'search_names': ('cities_light.models.ToSearchTextField', [], {'default': "''", 'max_length': '4000', 'db_index': 'True', 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': 'None'})
},
'cities_light.country': {
'Meta': {'object_name': 'Country'},
'alternate_names': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'code2': ('django.db.models.fields.CharField', [], {'max_length': '2', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'code3': ('django.db.models.fields.CharField', [], {'max_length': '3', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'continent': ('django.db.models.fields.CharField', [], {'max_length': '2', 'db_index': 'True'}),
'geoname_id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'name_ascii': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': 'None'}),
'tld': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '5', 'blank': 'True'})
},
'cities_light.region': {
'Meta': {'unique_together': "(('country', 'name'),)", 'object_name': 'Region'},
'alternate_names': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cities_light.Country']"}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'geoname_code': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '50', 'null': 'True', 'blank': 'True'}),
'geoname_id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'name_ascii': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': 'None'})
},
'events.event': {
'Meta': {'object_name': 'Event'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'events'", 'to': "orm['events.EventCategory']"}),
'city': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cities_light.City']", 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cities_light.Country']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'visits_num': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'events.eventcategory': {
'Meta': {'object_name': 'EventCategory'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'visits_num': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
}
}
complete_apps = ['events']
| 75.734694
| 152
| 0.560765
| 764
| 7,422
| 5.311518
| 0.143979
| 0.100542
| 0.172499
| 0.246427
| 0.810251
| 0.783144
| 0.715377
| 0.706013
| 0.686792
| 0.66412
| 0
| 0.01034
| 0.192132
| 7,422
| 98
| 153
| 75.734694
| 0.666444
| 0.02277
| 0
| 0.423529
| 0
| 0
| 0.555326
| 0.286286
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023529
| false
| 0
| 0.047059
| 0
| 0.105882
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f4399ebf5dd63b964e0c1dae5cd95ec230b4ffd9
| 420
|
py
|
Python
|
src/modules/priors/prior.py
|
Constantin771/Improved_srVAE
|
c1dc8662a077fa8b43ed14d77d5491f50faed79f
|
[
"MIT"
] | 60
|
2020-06-11T11:06:15.000Z
|
2022-03-31T14:35:19.000Z
|
src/modules/priors/prior.py
|
Constantin771/Improved_srVAE
|
c1dc8662a077fa8b43ed14d77d5491f50faed79f
|
[
"MIT"
] | 9
|
2020-06-28T09:45:28.000Z
|
2020-12-30T15:20:19.000Z
|
src/modules/priors/prior.py
|
Constantin771/Improved_srVAE
|
c1dc8662a077fa8b43ed14d77d5491f50faed79f
|
[
"MIT"
] | 9
|
2020-07-28T12:03:32.000Z
|
2022-03-31T14:34:08.000Z
|
import torch
import torch.nn as nn
class Prior(nn.Module):
def __init__(self):
super().__init__()
def sample(self, **kwargs):
raise NotImplementedError
def log_p(self, input, **kwargs):
return self.forward(z)
def forward(self, input, **kwargs):
raise NotImplementedError
def __str__(self):
raise NotImplementedError
if __name__ == "__main__":
pass
| 17.5
| 39
| 0.635714
| 48
| 420
| 5.125
| 0.541667
| 0.292683
| 0.243902
| 0.268293
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.259524
| 420
| 23
| 40
| 18.26087
| 0.790997
| 0
| 0
| 0.2
| 0
| 0
| 0.019048
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0.066667
| 0.133333
| 0.066667
| 0.6
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
f48002125b3db6047ffea0bb08eec9a4ef22c59d
| 36
|
py
|
Python
|
pose_tracking_3d/__init__.py
|
WildflowerSchools/wf-3d-pose-tracking
|
0ba099e3d573ecbc39773617c540360668fcee9a
|
[
"MIT"
] | 1
|
2019-12-06T21:15:36.000Z
|
2019-12-06T21:15:36.000Z
|
pose_tracking_3d/__init__.py
|
WildflowerSchools/wf-3d-pose-tracking
|
0ba099e3d573ecbc39773617c540360668fcee9a
|
[
"MIT"
] | 1
|
2019-12-15T23:49:06.000Z
|
2019-12-16T20:20:52.000Z
|
pose_tracking_3d/__init__.py
|
WildflowerSchools/wf-3d-pose-tracking
|
0ba099e3d573ecbc39773617c540360668fcee9a
|
[
"MIT"
] | 2
|
2019-12-06T19:46:07.000Z
|
2019-12-11T22:38:15.000Z
|
from pose_tracking_3d.core import *
| 18
| 35
| 0.833333
| 6
| 36
| 4.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.03125
| 0.111111
| 36
| 1
| 36
| 36
| 0.84375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
be797d25bdb54a612514ab0799e16c4b21917fe2
| 167
|
py
|
Python
|
cpab/gpu/__init__.py
|
freifeld/cpabDiffeo
|
22df6cdbd7111b9ae3e7f1c0e31ff85e92d281a6
|
[
"MIT"
] | 17
|
2016-03-16T21:35:36.000Z
|
2021-11-11T04:16:21.000Z
|
cpab/gpu/__init__.py
|
freifeld/cpabDiffeo
|
22df6cdbd7111b9ae3e7f1c0e31ff85e92d281a6
|
[
"MIT"
] | null | null | null |
cpab/gpu/__init__.py
|
freifeld/cpabDiffeo
|
22df6cdbd7111b9ae3e7f1c0e31ff85e92d281a6
|
[
"MIT"
] | 4
|
2016-08-12T23:02:09.000Z
|
2019-03-14T18:20:36.000Z
|
import os
dirname_of_cuda_files = os.path.abspath(os.path.dirname(str(__file__)))
if __name__ == "__main__":
print 'dirname_of_cuda_files:',dirname_of_cuda_files
| 27.833333
| 71
| 0.784431
| 26
| 167
| 4.230769
| 0.538462
| 0.245455
| 0.354545
| 0.490909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095808
| 167
| 6
| 72
| 27.833333
| 0.728477
| 0
| 0
| 0
| 0
| 0
| 0.178571
| 0.130952
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.25
| null | null | 0.25
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
be9c67abdf4450516929b46cb9d0a2bd1cd7a8ff
| 166
|
py
|
Python
|
misc/pyschemes_fun.py
|
diegopacheco/python-playground
|
8e6ba427df6922fb578c2328babbf3466687ccbf
|
[
"Unlicense"
] | null | null | null |
misc/pyschemes_fun.py
|
diegopacheco/python-playground
|
8e6ba427df6922fb578c2328babbf3466687ccbf
|
[
"Unlicense"
] | null | null | null |
misc/pyschemes_fun.py
|
diegopacheco/python-playground
|
8e6ba427df6922fb578c2328babbf3466687ccbf
|
[
"Unlicense"
] | null | null | null |
from pyschemes import Scheme, validators
from collections import Iterable
print( str( Scheme(int).validate(10) ))
print( str( Scheme(Iterable).validate([1, 2]) ))
| 27.666667
| 49
| 0.740964
| 22
| 166
| 5.590909
| 0.636364
| 0.130081
| 0.227642
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027586
| 0.126506
| 166
| 5
| 50
| 33.2
| 0.82069
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
fe57f8c8c300823790f15c1c2940a65808751edc
| 1,380
|
py
|
Python
|
tests/test_provider_invidian_libvirt.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 507
|
2017-07-26T02:58:38.000Z
|
2022-01-21T12:35:13.000Z
|
tests/test_provider_invidian_libvirt.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 135
|
2017-07-20T12:01:59.000Z
|
2021-10-04T22:25:40.000Z
|
tests/test_provider_invidian_libvirt.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 81
|
2018-02-20T17:55:28.000Z
|
2022-01-31T07:08:40.000Z
|
# tests/test_provider_invidian_libvirt.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:21:04 UTC)
def test_provider_import():
import terrascript.provider.invidian.libvirt
def test_resource_import():
from terrascript.resource.invidian.libvirt import libvirt_cloudinit_disk
from terrascript.resource.invidian.libvirt import libvirt_domain
from terrascript.resource.invidian.libvirt import libvirt_ignition
from terrascript.resource.invidian.libvirt import libvirt_network
from terrascript.resource.invidian.libvirt import libvirt_pool
from terrascript.resource.invidian.libvirt import libvirt_volume
def test_datasource_import():
from terrascript.data.invidian.libvirt import libvirt_network_dns_host_template
from terrascript.data.invidian.libvirt import libvirt_network_dns_srv_template
from terrascript.data.invidian.libvirt import (
libvirt_network_dnsmasq_options_template,
)
# TODO: Shortcut imports without namespace for official and supported providers.
# TODO: This has to be moved into a required_providers block.
# def test_version_source():
#
# import terrascript.provider.invidian.libvirt
#
# t = terrascript.provider.invidian.libvirt.libvirt()
# s = str(t)
#
# assert 'https://github.com/invidian/terraform-provider-libvirt' in s
# assert '0.6.10-rc1' in s
| 30.666667
| 83
| 0.784058
| 174
| 1,380
| 6.04023
| 0.413793
| 0.185538
| 0.179829
| 0.239772
| 0.549001
| 0.466223
| 0.466223
| 0.175071
| 0.175071
| 0
| 0
| 0.014382
| 0.143478
| 1,380
| 44
| 84
| 31.363636
| 0.874788
| 0.365217
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022727
| 0
| 1
| 0.2
| true
| 0
| 0.866667
| 0
| 1.066667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fe7dd5a7728664c1b1e9772fa5df5fda1c08e621
| 69
|
py
|
Python
|
CAIL2020/cocr/torchocr/networks/heads/__init__.py
|
ShenDezhou/CAIL
|
c4cfa98ab4ecedbce34a7a5a186830486047540c
|
[
"Apache-2.0"
] | 71
|
2020-07-16T01:49:27.000Z
|
2022-03-27T16:55:00.000Z
|
CAIL2020/cocr/torchocr/networks/heads/__init__.py
|
ShenDezhou/CAIL
|
c4cfa98ab4ecedbce34a7a5a186830486047540c
|
[
"Apache-2.0"
] | 11
|
2020-09-18T14:26:25.000Z
|
2022-02-09T23:49:33.000Z
|
CAIL2020/cocr/torchocr/networks/necks/__init__.py
|
ShenDezhou/CAIL
|
c4cfa98ab4ecedbce34a7a5a186830486047540c
|
[
"Apache-2.0"
] | 16
|
2020-07-15T07:24:30.000Z
|
2022-03-19T05:41:11.000Z
|
# -*- coding: utf-8 -*-
# @Time : 2020/5/15 17:42
# @Author : THU
| 23
| 28
| 0.492754
| 11
| 69
| 3.090909
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.230769
| 0.246377
| 69
| 3
| 29
| 23
| 0.423077
| 0.913043
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
fe91f139723675c70a0d84c29b7f358dc9272a66
| 1,432
|
py
|
Python
|
tests/datastructures/trees/test_serialize_tree.py
|
sikakente/educative-io-python
|
be6e6c3534bf76e6f77addce16d1ab0c40e3e48d
|
[
"MIT"
] | 1
|
2021-12-28T21:19:53.000Z
|
2021-12-28T21:19:53.000Z
|
tests/datastructures/trees/test_serialize_tree.py
|
sikakente/educative-io-python
|
be6e6c3534bf76e6f77addce16d1ab0c40e3e48d
|
[
"MIT"
] | 72
|
2022-02-01T18:18:47.000Z
|
2022-03-13T12:31:26.000Z
|
tests/datastructures/trees/test_serialize_tree.py
|
sikakente/educative-io-python
|
be6e6c3534bf76e6f77addce16d1ab0c40e3e48d
|
[
"MIT"
] | null | null | null |
import unittest
import pytest
from datastructures.trees.binary_tree_utils import BinaryTree, NULL_MARKER
from datastructures.trees.serialize_tree import Codec, Codec2
@pytest.mark.parametrize("values", [
([2, 1, 3]),
([]),
([1, 2]),
([1, NULL_MARKER, 2]),
([5, 3, 6, 2, 4, NULL_MARKER, NULL_MARKER, 1])
])
def test_serialize_deserialize_tree(values):
tree = BinaryTree().build_tree(values)
serializer = Codec()
deserializer = Codec()
serialized_tree = serializer.serialize(tree.root)
deserialized_tree = deserializer.deserialize(serialized_tree)
BinaryTree().preorder(tree.root, [])
BinaryTree().preorder(deserialized_tree, [])
assert BinaryTree().preorder(tree.root, []) == BinaryTree().preorder(deserialized_tree, [])
@pytest.mark.parametrize("values", [
([2, 1, 3]),
([]),
([1, 2]),
([1, NULL_MARKER, 2]),
([5, 3, 6, 2, 4, NULL_MARKER, NULL_MARKER, 1])
])
def test_serialize_deserialize_tree(values):
tree = BinaryTree().build_tree(values)
serializer = Codec2()
deserializer = Codec2()
serialized_tree = serializer.serialize(tree.root)
deserialized_tree = deserializer.deserialize(serialized_tree)
BinaryTree().preorder(tree.root, [])
BinaryTree().preorder(deserialized_tree, [])
assert BinaryTree().preorder(tree.root, []) == BinaryTree().preorder(deserialized_tree, [])
if __name__ == '__main__':
unittest.main()
| 31.822222
| 95
| 0.683659
| 161
| 1,432
| 5.857143
| 0.236025
| 0.152704
| 0.093319
| 0.110286
| 0.776246
| 0.776246
| 0.776246
| 0.776246
| 0.776246
| 0.776246
| 0
| 0.024066
| 0.15852
| 1,432
| 44
| 96
| 32.545455
| 0.758506
| 0
| 0
| 0.736842
| 0
| 0
| 0.013966
| 0
| 0
| 0
| 0
| 0
| 0.052632
| 1
| 0.052632
| false
| 0
| 0.105263
| 0
| 0.157895
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
22ab98ecd9c94f3ed445f7ea1e896080f788d740
| 974
|
py
|
Python
|
ShowMhd.py
|
ZvikaDia/LearnGitHub
|
c9f15e01fc795b08440fb30fdf2d0afcfd728fbc
|
[
"BSD-2-Clause"
] | null | null | null |
ShowMhd.py
|
ZvikaDia/LearnGitHub
|
c9f15e01fc795b08440fb30fdf2d0afcfd728fbc
|
[
"BSD-2-Clause"
] | 1
|
2021-08-19T10:20:53.000Z
|
2021-08-19T10:27:35.000Z
|
ShowMhd.py
|
ZvikaDia/LearnGitHub
|
c9f15e01fc795b08440fb30fdf2d0afcfd728fbc
|
[
"BSD-2-Clause"
] | null | null | null |
import SimpleITK as sitk
import matplotlib.pylab as plt
ct_scans = sitk.GetArrayFromImage(sitk.ReadImage(r"D:\DIACARDIO\DATA\Camus\training\patient0002\patient0002_4CH_ES_gt.mhd", sitk.sitkFloat32))
plt.figure()
plt.gray()
plt.imshow(ct_scans[0])
# plt.subplots_adjust(0,0,1,1,0.01,0.01)
# for i in range(ct_scans.shape[0]):
# plt.subplot(5,6,i+1), plt.imshow(ct_scans[i]), plt.axis('off')
# # use plt.savefig(...) here if you want to save the images as .jpg, e.g.,
plt.savefig (r"D:\DeleteMe\20\test_gt.png")
ct_scans = sitk.GetArrayFromImage(sitk.ReadImage(r"D:\DIACARDIO\DATA\Camus\training\patient0002\patient0002_4CH_ES.mhd", sitk.sitkFloat32))
plt.figure()
plt.gray()
plt.imshow(ct_scans[0])
# plt.subplots_adjust(0,0,1,1,0.01,0.01)
# for i in range(ct_scans.shape[0]):
# plt.subplot(5,6,i+1), plt.imshow(ct_scans[i]), plt.axis('off')
# # use plt.savefig(...) here if you want to save the images as .jpg, e.g.,
plt.savefig (r"D:\DeleteMe\20\test.png")
| 46.380952
| 142
| 0.717659
| 179
| 974
| 3.815642
| 0.324022
| 0.081991
| 0.064422
| 0.093704
| 0.916545
| 0.916545
| 0.916545
| 0.916545
| 0.916545
| 0.916545
| 0
| 0.063636
| 0.096509
| 974
| 21
| 143
| 46.380952
| 0.7125
| 0.446612
| 0
| 0.5
| 0
| 0
| 0.351607
| 0.351607
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
22d682bd051b7e91419cbfd52b1e75f59c3df840
| 776
|
py
|
Python
|
steam/client/builtins/__init__.py
|
oczkers/steam-httpx
|
77c4c63a5b9937839f62acac01f8e07ca765f20c
|
[
"MIT"
] | 1
|
2019-10-01T11:33:44.000Z
|
2019-10-01T11:33:44.000Z
|
steam/client/builtins/__init__.py
|
wynick27/steam
|
deb1390f319d91d0fec14c32514f1a5f95d4647b
|
[
"MIT"
] | null | null | null |
steam/client/builtins/__init__.py
|
wynick27/steam
|
deb1390f319d91d0fec14c32514f1a5f95d4647b
|
[
"MIT"
] | null | null | null |
"""
All high level features of :class:`steam.client.SteamClient` are implemented here in separate submodules.
"""
from steam.client.builtins.account import Account
from steam.client.builtins.user import User
from steam.client.builtins.web import Web
from steam.client.builtins.unified_messages import UnifiedMessages
from steam.client.builtins.leaderboards import Leaderboards
from steam.client.builtins.gameservers import GameServers
from steam.client.builtins.friends import Friends
from steam.client.builtins.apps import Apps
class BuiltinBase(GameServers, UnifiedMessages, Account, User, Web, Leaderboards, Friends, Apps):
"""
This object is used as base to implement all high level functionality.
The features are separated into submodules.
"""
pass
| 40.842105
| 105
| 0.804124
| 101
| 776
| 6.168317
| 0.415842
| 0.158909
| 0.192616
| 0.295345
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 776
| 18
| 106
| 43.111111
| 0.917526
| 0.283505
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.1
| 0.8
| 0
| 0.9
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
a3db5a9ea9a2244850491175f9b783b611e2df44
| 156
|
py
|
Python
|
socket_programming/using_telnet.py
|
disooqi/learn-with-corey-schafer
|
d76b4e30190ed4d514455ddbb07cae96e6ed4d2e
|
[
"MIT"
] | 2
|
2018-03-06T22:27:11.000Z
|
2020-10-04T06:14:27.000Z
|
socket_programming/using_telnet.py
|
disooqi/learn-with-corey-schafer
|
d76b4e30190ed4d514455ddbb07cae96e6ed4d2e
|
[
"MIT"
] | null | null | null |
socket_programming/using_telnet.py
|
disooqi/learn-with-corey-schafer
|
d76b4e30190ed4d514455ddbb07cae96e6ed4d2e
|
[
"MIT"
] | 1
|
2018-03-06T22:27:14.000Z
|
2018-03-06T22:27:14.000Z
|
import getpass
import telnetlib
from telnetlib import Telnet
with Telnet('localhost', 9600) as tn:
tn.write('disooqi'.encode('ascii'))
# tn.interact()
| 19.5
| 39
| 0.737179
| 21
| 156
| 5.47619
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02963
| 0.134615
| 156
| 7
| 40
| 22.285714
| 0.822222
| 0.083333
| 0
| 0
| 0
| 0
| 0.148936
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.2
| 0.6
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
a3e6c41c13851cd73965d5c7209ec44eabb4392e
| 44
|
py
|
Python
|
mne_qt_browser/figure.py
|
marsipu/mne-qt-browser
|
6b67dd5af1ef0e39590ffacfbd4dffd8bd7d273d
|
[
"BSD-3-Clause"
] | 1
|
2021-11-01T08:59:15.000Z
|
2021-11-01T08:59:15.000Z
|
mne_qt_browser/figure.py
|
mscheltienne/mne-qt-browser
|
6c2431632c577fb41f2c1fa25dfbe4e19205da69
|
[
"BSD-3-Clause"
] | null | null | null |
mne_qt_browser/figure.py
|
mscheltienne/mne-qt-browser
|
6c2431632c577fb41f2c1fa25dfbe4e19205da69
|
[
"BSD-3-Clause"
] | null | null | null |
from ._pg_figure import MNEQtBrowser # noqa
| 44
| 44
| 0.818182
| 6
| 44
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136364
| 44
| 1
| 44
| 44
| 0.894737
| 0.090909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
431a3b3236d9250faac854d90248e62b7cf16b52
| 101
|
py
|
Python
|
accounts/filters.py
|
yamansener199/CS308-Project
|
11b915c891494278db73dede565554704cbc8ae2
|
[
"Apache-2.0"
] | 1
|
2021-11-13T11:35:40.000Z
|
2021-11-13T11:35:40.000Z
|
accounts/filters.py
|
yamansener199/CS308-Project
|
11b915c891494278db73dede565554704cbc8ae2
|
[
"Apache-2.0"
] | null | null | null |
accounts/filters.py
|
yamansener199/CS308-Project
|
11b915c891494278db73dede565554704cbc8ae2
|
[
"Apache-2.0"
] | 2
|
2021-11-11T14:22:38.000Z
|
2021-11-13T11:35:42.000Z
|
import django_filters
from django_filters import DateFilter, CharFilter
from .models import *
| 16.833333
| 50
| 0.792079
| 12
| 101
| 6.5
| 0.583333
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.178218
| 101
| 5
| 51
| 20.2
| 0.939759
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4a375f3eef82646da3adb5a7927be5b831428df3
| 261
|
py
|
Python
|
coveo-functools/coveo_functools/flex/__init__.py
|
coveooss/coveo-python-oss
|
08f5048e12449392f3acaeff85e075711543daab
|
[
"Apache-2.0"
] | 7
|
2021-03-02T19:30:30.000Z
|
2022-03-08T12:24:50.000Z
|
coveo-functools/coveo_functools/flex/__init__.py
|
coveooss/coveo-python-oss
|
08f5048e12449392f3acaeff85e075711543daab
|
[
"Apache-2.0"
] | 29
|
2021-01-21T16:45:33.000Z
|
2021-12-10T12:09:26.000Z
|
coveo-functools/coveo_functools/flex/__init__.py
|
coveooss/coveo-python-oss
|
08f5048e12449392f3acaeff85e075711543daab
|
[
"Apache-2.0"
] | null | null | null |
# backward compatibility stuff and import shortcuts
from coveo_functools.flex.decorator import flex, RAW_KEY # noqa: F401
from coveo_functools.flex.deserializer import deserialize # noqa: F401
from coveo_functools.flex.types import * # noqa: F401,F403
| 43.5
| 72
| 0.793103
| 35
| 261
| 5.8
| 0.542857
| 0.133005
| 0.26601
| 0.325123
| 0.295567
| 0.295567
| 0
| 0
| 0
| 0
| 0
| 0.053812
| 0.145594
| 261
| 5
| 73
| 52.2
| 0.856502
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4a907bcfa0dc0ff154f451b28b77c0b96f47c1a2
| 2,768
|
py
|
Python
|
ivy_tests/test_ivy/test_functional/test_core/test_logic.py
|
MudasserAfzal/ivy
|
d43b5da54651ebf1183913acee1279b881e84245
|
[
"Apache-2.0"
] | 1
|
2022-02-28T03:20:54.000Z
|
2022-02-28T03:20:54.000Z
|
ivy_tests/test_ivy/test_functional/test_core/test_logic.py
|
MudasserAfzal/ivy
|
d43b5da54651ebf1183913acee1279b881e84245
|
[
"Apache-2.0"
] | null | null | null |
ivy_tests/test_ivy/test_functional/test_core/test_logic.py
|
MudasserAfzal/ivy
|
d43b5da54651ebf1183913acee1279b881e84245
|
[
"Apache-2.0"
] | null | null | null |
"""
Collection of tests for unified logic functions
"""
# global
import pytest
import numpy as np
# local
import ivy
import ivy.functional.backends.numpy
import ivy_tests.test_ivy.helpers as helpers
# logical_and
@pytest.mark.parametrize(
"x1_n_x2", [([True, True], [False, True]), ([[0.]], [[1.]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_logical_and(x1_n_x2, dtype, tensor_fn, dev, call):
# smoke test
x1, x2 = x1_n_x2
x1 = tensor_fn(x1, dtype, dev)
x2 = tensor_fn(x2, dtype, dev)
ret = ivy.logical_and(x1, x2)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x1.shape
# value test
assert np.allclose(call(ivy.logical_and, x1, x2), ivy.functional.backends.numpy.logical_and(ivy.to_numpy(x1), ivy.to_numpy(x2)))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support .type() method
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.logical_and)
# logical_or
@pytest.mark.parametrize(
"x1_n_x2", [([True, True], [False, True]), ([[0.]], [[1.]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_logical_or(x1_n_x2, dtype, tensor_fn, dev, call):
# smoke test
x1, x2 = x1_n_x2
x1 = tensor_fn(x1, dtype, dev)
x2 = tensor_fn(x2, dtype, dev)
ret = ivy.logical_or(x1, x2)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x1.shape
# value test
assert np.allclose(call(ivy.logical_or, x1, x2), ivy.functional.backends.numpy.logical_or(ivy.to_numpy(x1), ivy.to_numpy(x2)))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support .type() method
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.logical_or)
# logical_not
@pytest.mark.parametrize(
"x", [[True, True], [[0.]]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_logical_not(x, dtype, tensor_fn, dev, call):
# smoke test
x = tensor_fn(x, dtype, dev)
ret = ivy.logical_not(x)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.logical_not, x), ivy.functional.backends.numpy.logical_not(ivy.to_numpy(x)))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support .type() method
return
if not ivy.array_mode():
helpers.assert_compilable(ivy.logical_not)
| 30.086957
| 132
| 0.66474
| 402
| 2,768
| 4.412935
| 0.159204
| 0.049605
| 0.106539
| 0.058625
| 0.846674
| 0.810598
| 0.810598
| 0.752537
| 0.752537
| 0.727734
| 0
| 0.022022
| 0.196171
| 2,768
| 91
| 133
| 30.417582
| 0.775281
| 0.160405
| 0
| 0.649123
| 0
| 0
| 0.034002
| 0
| 0
| 0
| 0
| 0
| 0.210526
| 1
| 0.052632
| false
| 0
| 0.087719
| 0
| 0.192982
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
43cfdf154d0ec1ce3831edaaeb95a6d991105d96
| 3,410
|
py
|
Python
|
tests/datasets/test_multi_datamodule.py
|
facebookresearch/pythia
|
079740bee4b357a7b1b866d35e2f1fad6edba8a4
|
[
"BSD-3-Clause"
] | 3,252
|
2018-07-27T02:32:24.000Z
|
2020-05-07T17:54:46.000Z
|
tests/datasets/test_multi_datamodule.py
|
facebookresearch/pythia
|
079740bee4b357a7b1b866d35e2f1fad6edba8a4
|
[
"BSD-3-Clause"
] | 209
|
2018-07-30T06:39:59.000Z
|
2020-05-04T22:03:48.000Z
|
tests/datasets/test_multi_datamodule.py
|
facebookresearch/pythia
|
079740bee4b357a7b1b866d35e2f1fad6edba8a4
|
[
"BSD-3-Clause"
] | 431
|
2018-07-27T04:17:37.000Z
|
2020-05-05T13:58:02.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
import functools
import torch
from mmf.datasets.lightning_multi_datamodule import LightningMultiDataModule
from mmf.datasets.mmf_dataset_builder import MMFDatasetBuilder
from mmf.datasets.multi_datamodule import MultiDataModule
from omegaconf import OmegaConf
from tests.datasets.test_mmf_dataset_builder import SimpleMMFDataset
class MultiDataModuleTestObject(MultiDataModule):
def __init__(self, batch_size):
self.batch_size = batch_size
config = OmegaConf.create(
{
"use_features": True,
"annotations": {
"train": "not_a_real_annotations_dataset",
"val": "not_a_real_annotations_dataset",
},
"features": {
"train": "not_a_real_features_dataset",
"val": "not_a_real_features_dataset",
},
"dataset_config": {"simple": 0},
}
)
self.config = config
self.dataset_list = []
dataset_builder = MMFDatasetBuilder(
"simple", functools.partial(SimpleMMFDataset, num_examples=100)
)
dataset_builder.train_dataloader = self._get_dataloader
dataset_builder.val_dataloader = self._get_dataloader
dataset_builder.test_dataloader = self._get_dataloader
self.datamodules = {"simple": dataset_builder}
def _get_dataloader(self):
dataset = SimpleMMFDataset(
num_examples=100,
dataset_name="simple",
dataset_type="val",
config=self.config,
)
dataloader = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=self.batch_size,
shuffle=False,
num_workers=1,
drop_last=False,
)
return dataloader
class LightningDataModuleTestObject(LightningMultiDataModule):
def __init__(self, batch_size):
self.batch_size = batch_size
config = OmegaConf.create(
{
"use_features": True,
"annotations": {
"train": "not_a_real_annotations_dataset",
"val": "not_a_real_annotations_dataset",
},
"features": {
"train": "not_a_real_features_dataset",
"val": "not_a_real_features_dataset",
},
"dataset_config": {"simple": 0},
}
)
self.config = config
self.dataset_list = []
dataset_builder = MMFDatasetBuilder(
"simple", functools.partial(SimpleMMFDataset, num_examples=100)
)
dataset_builder.train_dataloader = self._get_dataloader
dataset_builder.val_dataloader = self._get_dataloader
dataset_builder.test_dataloader = self._get_dataloader
self.datamodules = {"simple": dataset_builder}
def _get_dataloader(self):
dataset = SimpleMMFDataset(
num_examples=100,
dataset_name="simple",
dataset_type="val",
config=self.config,
)
dataloader = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=self.batch_size,
shuffle=False,
num_workers=1,
drop_last=False,
)
return dataloader
| 34.795918
| 76
| 0.592082
| 309
| 3,410
| 6.184466
| 0.210356
| 0.087912
| 0.03349
| 0.084772
| 0.77551
| 0.77551
| 0.77551
| 0.77551
| 0.77551
| 0.77551
| 0
| 0.00696
| 0.325806
| 3,410
| 97
| 77
| 35.154639
| 0.824271
| 0.014076
| 0
| 0.719101
| 0
| 0
| 0.120238
| 0.067857
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044944
| false
| 0
| 0.078652
| 0
| 0.168539
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
43d39546ea2d1046b2d090b13c9a78e4f68b1b01
| 246
|
py
|
Python
|
Python/B2-Wuerfel/Wuerfel.py
|
frankyhub/Calliope
|
335f0ef5ca9bcf57e14166319501ec9086bc09bf
|
[
"MIT"
] | null | null | null |
Python/B2-Wuerfel/Wuerfel.py
|
frankyhub/Calliope
|
335f0ef5ca9bcf57e14166319501ec9086bc09bf
|
[
"MIT"
] | null | null | null |
Python/B2-Wuerfel/Wuerfel.py
|
frankyhub/Calliope
|
335f0ef5ca9bcf57e14166319501ec9086bc09bf
|
[
"MIT"
] | null | null | null |
def on_button_pressed_a():
basic.show_number(randint(1, 6))
input.on_button_pressed(Button.A, on_button_pressed_a)
def on_button_pressed_b():
basic.show_number(randint(1, 4))
input.on_button_pressed(Button.B, on_button_pressed_b)
| 30.75
| 55
| 0.772358
| 42
| 246
| 4.095238
| 0.333333
| 0.27907
| 0.523256
| 0.209302
| 0.569767
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018349
| 0.113821
| 246
| 7
| 56
| 35.142857
| 0.770642
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
78f38e5a158b625177f169e9748e7a88cbc9ecd2
| 4,594
|
py
|
Python
|
graphql_compiler/tests/test_graphql_pretty_print.py
|
manesioz/graphql-compiler
|
b23be9c4a8e26f8c82e741625e04f7c9ac2e623b
|
[
"Apache-2.0"
] | 521
|
2017-07-18T23:56:25.000Z
|
2022-03-25T16:39:06.000Z
|
graphql_compiler/tests/test_graphql_pretty_print.py
|
manesioz/graphql-compiler
|
b23be9c4a8e26f8c82e741625e04f7c9ac2e623b
|
[
"Apache-2.0"
] | 740
|
2017-07-19T01:52:42.000Z
|
2021-09-30T11:15:00.000Z
|
graphql_compiler/tests/test_graphql_pretty_print.py
|
manesioz/graphql-compiler
|
b23be9c4a8e26f8c82e741625e04f7c9ac2e623b
|
[
"Apache-2.0"
] | 56
|
2017-07-18T23:56:14.000Z
|
2021-10-30T08:08:56.000Z
|
# Copyright 2017-present Kensho Technologies, LLC.
from textwrap import dedent
import unittest
from ..query_formatting.graphql_formatting import pretty_print_graphql
class GraphQLPrettyPrintTests(unittest.TestCase):
def test_graphql_pretty_print_indentation(self) -> None:
bad_query = """{
Animal {
name @output(out_name: "name")
}
}"""
four_space_output = dedent(
"""\
{
Animal {
name @output(out_name: "name")
}
}
"""
)
two_space_output = dedent(
"""\
{
Animal {
name @output(out_name: "name")
}
}
"""
)
self.assertEqual(four_space_output, pretty_print_graphql(bad_query))
self.assertEqual(two_space_output, pretty_print_graphql(bad_query, use_four_spaces=False))
def test_filter_directive_order(self) -> None:
bad_query = """{
Animal @filter(value: ["$name"], op_name: "name_or_alias") {
uuid @filter(value: ["$max_uuid"], op_name: "<=")
out_Entity_Related {
...on Species{
name @output(out_name: "related_species")
}
}
}
}"""
expected_output = dedent(
"""\
{
Animal @filter(op_name: "name_or_alias", value: ["$name"]) {
uuid @filter(op_name: "<=", value: ["$max_uuid"])
out_Entity_Related {
... on Species {
name @output(out_name: "related_species")
}
}
}
}
"""
)
self.assertEqual(expected_output, pretty_print_graphql(bad_query))
def test_args_not_in_schema(self) -> None:
bad_query = """{
Animal @filter(value: ["$name"], unknown_arg: "value", op_name: "name_or_alias") {
uuid @filter(value: ["$max_uuid"], op_name: "<=")
out_Entity_Related {
...on Species{
name @output(out_name: "related_species")
}
}
}
}"""
expected_output = dedent(
"""\
{
Animal @filter(op_name: "name_or_alias", value: ["$name"], unknown_arg: "value") {
uuid @filter(op_name: "<=", value: ["$max_uuid"])
out_Entity_Related {
... on Species {
name @output(out_name: "related_species")
}
}
}
}
"""
)
self.assertEqual(expected_output, pretty_print_graphql(bad_query))
def test_missing_args(self) -> None:
bad_query = """{
Animal @filter(value: ["$name"]) {
uuid @filter(value: ["$max_uuid"], op_name: "<=")
out_Entity_Related {
...on Species{
name @output(out_name: "related_species")
}
}
}
}"""
expected_output = dedent(
"""\
{
Animal @filter(value: ["$name"]) {
uuid @filter(op_name: "<=", value: ["$max_uuid"])
out_Entity_Related {
... on Species {
name @output(out_name: "related_species")
}
}
}
}
"""
)
self.assertEqual(expected_output, pretty_print_graphql(bad_query))
def test_other_directive(self) -> None:
bad_query = """{
Animal @filter(value: ["$name"]) {
uuid @filter(value: ["$max_uuid"], op_name: "<=")
out_Entity_Related @other(arg1: "val1", arg2: "val2") {
...on Species{
name @output(out_name: "related_species")
}
}
}
}"""
expected_output = dedent(
"""\
{
Animal @filter(value: ["$name"]) {
uuid @filter(op_name: "<=", value: ["$max_uuid"])
out_Entity_Related @other(arg1: "val1", arg2: "val2") {
... on Species {
name @output(out_name: "related_species")
}
}
}
}
"""
)
self.assertEqual(expected_output, pretty_print_graphql(bad_query))
| 29.63871
| 98
| 0.44471
| 380
| 4,594
| 5.047368
| 0.163158
| 0.037539
| 0.074557
| 0.097497
| 0.818561
| 0.794578
| 0.780501
| 0.741919
| 0.703337
| 0.657456
| 0
| 0.004591
| 0.430997
| 4,594
| 154
| 99
| 29.831169
| 0.729151
| 0.010448
| 0
| 0.486111
| 0
| 0
| 0.512169
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 1
| 0.069444
| false
| 0
| 0.041667
| 0
| 0.125
| 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
78ff0ff94b158b8a0b4a0bdd82c57578f958bc59
| 121
|
py
|
Python
|
hydra/plugins/plugin.py
|
andrewjong/hydra
|
c2faea0f137164721e73d4d0143f9e03554daae4
|
[
"MIT"
] | 2
|
2021-02-06T00:23:56.000Z
|
2021-03-08T17:31:49.000Z
|
hydra/plugins/plugin.py
|
andrewjong/hydra
|
c2faea0f137164721e73d4d0143f9e03554daae4
|
[
"MIT"
] | 4
|
2021-10-06T22:51:46.000Z
|
2022-02-27T12:53:27.000Z
|
hydra/plugins/plugin.py
|
andrewjong/hydra
|
c2faea0f137164721e73d4d0143f9e03554daae4
|
[
"MIT"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from abc import ABC
class Plugin(ABC):
pass
| 17.285714
| 70
| 0.727273
| 18
| 121
| 4.888889
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.198347
| 121
| 6
| 71
| 20.166667
| 0.907216
| 0.561983
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
78ffa1be2666509203b8ad6638c6f9b74cdd04a0
| 45
|
py
|
Python
|
RobotMovingPanel/features/__init__.py
|
Vlad12344/Pulseapi_Integration
|
2acf93a17dd2911328141886b8724134fff84f00
|
[
"MIT"
] | null | null | null |
RobotMovingPanel/features/__init__.py
|
Vlad12344/Pulseapi_Integration
|
2acf93a17dd2911328141886b8724134fff84f00
|
[
"MIT"
] | null | null | null |
RobotMovingPanel/features/__init__.py
|
Vlad12344/Pulseapi_Integration
|
2acf93a17dd2911328141886b8724134fff84f00
|
[
"MIT"
] | null | null | null |
from RobotMovingPanel.features.TCP import tcp
| 45
| 45
| 0.888889
| 6
| 45
| 6.666667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.066667
| 45
| 1
| 45
| 45
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
602893babf2340c835d0745a7c7a285b961f55ee
| 21,295
|
py
|
Python
|
demo_scripts/variable_type_config.py
|
groverpr/aws-fraud-detector-samples
|
a1f178ee4389416b93750abb1db622f74a6b3cb4
|
[
"MIT-0"
] | null | null | null |
demo_scripts/variable_type_config.py
|
groverpr/aws-fraud-detector-samples
|
a1f178ee4389416b93750abb1db622f74a6b3cb4
|
[
"MIT-0"
] | null | null | null |
demo_scripts/variable_type_config.py
|
groverpr/aws-fraud-detector-samples
|
a1f178ee4389416b93750abb1db622f74a6b3cb4
|
[
"MIT-0"
] | 1
|
2022-01-25T20:48:22.000Z
|
2022-01-25T20:48:22.000Z
|
RECIPE = \
{
"Registration_FakeAccountCreationByBots":
{
"data_path": "data/Registration_FakeAccountCreationByBots_100k.csv",
"variable_mappings": [
{
"variable_name": "first_name",
"variable_type": "SHIPPING_NAME",
"data_type": "STRING"
},
{
"variable_name": "last_name",
"variable_type": "BILLING_NAME",
"data_type": "STRING"
},
{
"variable_name": "ip_address",
"variable_type": "IP_ADDRESS",
"data_type": "STRING"
},
{
"variable_name": "honeypot_hits_with_given_user_agent_last_hour",
"variable_type": "NUMERIC",
"data_type": "FLOAT"
},
{
"variable_name": "email_address",
"variable_type": "EMAIL_ADDRESS",
"data_type": "STRING"
},
{
"variable_name": "user_agent",
"variable_type": "USERAGENT",
"data_type": "STRING"
},
{
"variable_name": "honeypot_hits_with_given_ip_address_last_hour",
"variable_type": "NUMERIC",
"data_type": "FLOAT"
}
],
"label_mappings": {
"FRAUD": ["fraud"],
"LEGIT": ["legit"]
}
},
"Registration_FakeAccountCreationByHumans":
{
"data_path": "data/Registration_FakeAccountCreationByHumans_100k.csv",
"variable_mappings": [
{
"variable_name": "first_name",
"variable_type": "SHIPPING_NAME",
"data_type": "STRING"
},
{
"variable_name": "last_name",
"variable_type": "BILLING_NAME",
"data_type": "STRING"
},
{
"variable_name": "ip_address",
"variable_type": "IP_ADDRESS",
"data_type": "STRING"
},
{
"variable_name": "user_agent",
"variable_type": "USERAGENT",
"data_type": "STRING"
},
{
"variable_name": "email_address",
"variable_type": "EMAIL_ADDRESS",
"data_type": "STRING"
},
{
"variable_name": "date_of_birth",
"variable_type": "FREE_FORM_TEXT",
"data_type": "STRING"
},
{
"variable_name": "email_domain",
"variable_type": "CATEGORICAL",
"data_type": "STRING"
}
],
"label_mappings": {
"FRAUD": ["fraud"],
"LEGIT": ["legit"]
}
},
"Transactions_CardNotPresentOnlineTransactions":
{
"data_path": "data/Transactions_CardNotPresentOnlineTransactions_100k.csv",
"variable_mappings": [
{
"variable_name": "ip_address",
"variable_type": "IP_ADDRESS",
"data_type": "STRING"
},
{
"variable_name": "user_agent",
"variable_type": "USERAGENT",
"data_type": "STRING"
},
{
"variable_name": "email_address",
"variable_type": "EMAIL_ADDRESS",
"data_type": "STRING"
},
{
"variable_name": "fingerprint",
"variable_type": "FINGERPRINT",
"data_type": "STRING"
},
{
"variable_name": "phone_number",
"variable_type": "PHONE_NUMBER",
"data_type": "STRING"
},
{
"variable_name": "billing_address",
"variable_type": "BILLING_ADDRESS_L1",
"data_type": "STRING"
},
{
"variable_name": "billing_city",
"variable_type": "BILLING_CITY",
"data_type": "STRING"
},
{
"variable_name": "billing_postal",
"variable_type": "BILLING_ZIP",
"data_type": "STRING"
},
{
"variable_name": "billing_state",
"variable_type": "BILLING_STATE",
"data_type": "STRING"
},
{
"variable_name": "billing_country",
"variable_type": "BILLING_COUNTRY",
"data_type": "STRING"
},
{
"variable_name": "merchant_id",
"variable_type": "CATEGORICAL",
"data_type": "STRING"
},
{
"variable_name": "card_bin",
"variable_type": "CARD_BIN",
"data_type": "INTEGER"
},
{
"variable_name": "product_id",
"variable_type": "CATEGORICAL",
"data_type": "STRING"
},
{
"variable_name": "product_category",
"variable_type": "CATEGORICAL",
"data_type": "STRING"
},
{
"variable_name": "transaction_amount",
"variable_type": "NUMERIC",
"data_type": "FLOAT"
},
{
"variable_name": "shipping_address",
"variable_type": "SHIPPING_ADDRESS_L1",
"data_type": "STRING"
},
{
"variable_name": "shipping_city",
"variable_type": "SHIPPING_CITY",
"data_type": "STRING"
},
{
"variable_name": "shipping_postal",
"variable_type": "SHIPPING_ZIP",
"data_type": "STRING"
},
{
"variable_name": "shipping_state",
"variable_type": "SHIPPING_STATE",
"data_type": "STRING"
},
{
"variable_name": "shipping_country",
"variable_type": "SHIPPING_COUNTRY",
"data_type": "STRING"
}
],
"label_mappings": {
"FRAUD": ["fraud"],
"LEGIT": ["legit"]
}
},
"Transactions_LoyaltyPayments":
{
"data_path": "data/Transactions_LoyaltyPayments_100k.csv",
"variable_mappings": [
{
"variable_name": "ip_address",
"variable_type": "IP_ADDRESS",
"data_type": "STRING"
},
{
"variable_name": "user_agent",
"variable_type": "USERAGENT",
"data_type": "STRING"
},
{
"variable_name": "email_address",
"variable_type": "EMAIL_ADDRESS",
"data_type": "STRING"
},
{
"variable_name": "is_code_transferred",
"variable_type": "CATEGORICAL",
"data_type": "STRING"
},
{
"variable_name": "is_postal_in_txn_same_as_postal_in_acnt",
"variable_type": "CATEGORICAL",
"data_type": "STRING"
},
{
"variable_name": "shipping_city",
"variable_type": "SHIPPING_CITY",
"data_type": "STRING"
},
{
"variable_name": "shipping_postal",
"variable_type": "SHIPPING_ZIP",
"data_type": "STRING"
},
{
"variable_name": "shipping_state",
"variable_type": "SHIPPING_STATE",
"data_type": "STRING"
},
{
"variable_name": "shipping_country",
"variable_type": "SHIPPING_COUNTRY",
"data_type": "STRING"
},
{
"variable_name": "loyalty_card_type",
"variable_type": "CATEGORICAL",
"data_type": "STRING"
},
{
"variable_name": "transaction_amount",
"variable_type": "NUMERIC",
"data_type": "FLOAT"
},
{
"variable_name": "count_previous_redemptions_device",
"variable_type": "NUMERIC",
"data_type": "FLOAT"
},
{
"variable_name": "count_previous_redemptions_ip",
"variable_type": "NUMERIC",
"data_type": "FLOAT"
},
{
"variable_name": "count_of_txns_loyalty_card_last_day",
"variable_type": "NUMERIC",
"data_type": "FLOAT"
},
{
"variable_name": "amount_of_txns_loyalty_card_last_day",
"variable_type": "NUMERIC",
"data_type": "FLOAT"
},
{
"variable_name": "total_reward_points",
"variable_type": "NUMERIC",
"data_type": "FLOAT"
},
{
"variable_name": "coupon_code",
"variable_type": "CATEGORICAL",
"data_type": "STRING"
},
{
"variable_name": "device_id",
"variable_type": "CATEGORICAL",
"data_type": "STRING"
}
],
"label_mappings": {
"FRAUD": ["fraud"],
"LEGIT": ["legit"]
}
},
"Abuse_FreeTrialReferralAbuse":
{
"data_path": "data/Abuse_FreeTrialReferralAbuse_100k.csv",
"variable_mappings": [
{
"variable_name": "referral_code",
"variable_type": "CATEGORICAL",
"data_type": "STRING"
},
{
"variable_name": "first_name",
"variable_type": "SHIPPING_NAME",
"data_type": "STRING"
},
{
"variable_name": "card_bin",
"variable_type": "CARD_BIN",
"data_type": "INTEGER"
},
{
"variable_name": "last_name",
"variable_type": "BILLING_NAME",
"data_type": "STRING"
},
{
"variable_name": "email_address",
"variable_type": "EMAIL_ADDRESS",
"data_type": "STRING"
},
{
"variable_name": "ip_address",
"variable_type": "IP_ADDRESS",
"data_type": "STRING"
},
{
"variable_name": "phone_number",
"variable_type": "PHONE_NUMBER",
"data_type": "STRING"
},
{
"variable_name": "postal_code",
"variable_type": "BILLING_ZIP",
"data_type": "STRING"
},
{
"variable_name": "referral_medium",
"variable_type": "CATEGORICAL",
"data_type": "STRING"
}
],
"label_mappings": {
"FRAUD": ["fraud"],
"LEGIT": ["legit"]
}
},
"ContentModeration_FakeReviews":
{
"data_path": "data/ContentModeration_FakeReviews_100k.csv",
"variable_mappings": [
{
"variable_name": "hour_of_review",
"variable_type": "NUMERIC",
"data_type": "FLOAT"
},
{
"variable_name": "asin",
"variable_type": "CATEGORICAL",
"data_type": "STRING"
},
{
"variable_name": "review_text",
"variable_type": "FREE_FORM_TEXT",
"data_type": "STRING"
},
{
"variable_name": "rating",
"variable_type": "NUMERIC",
"data_type": "FLOAT"
}
],
"label_mappings": {
"FRAUD": ["fraud"],
"LEGIT": ["legit"]
}
},
"Insurance_FraudulentAutoInsuranceClaims":
{
"data_path": "data/Insurance_FraudulentAutoInsuranceClaims_100k.csv",
"variable_mappings": [
{
"variable_name": "first_name",
"variable_type": "SHIPPING_NAME",
"data_type": "STRING"
},
{
"variable_name": "last_name",
"variable_type": "BILLING_NAME",
"data_type": "STRING"
},
{
"variable_name": "policy_id",
"variable_type": "ORDER_ID",
"data_type": "STRING"
},
{
"variable_name": "policy_deductable",
"variable_type": "NUMERIC",
"data_type": "FLOAT"
},
{
"variable_name": "customer_age",
"variable_type": "NUMERIC",
"data_type": "FLOAT"
},
{
"variable_name": "policy_annual_premium",
"variable_type": "NUMERIC",
"data_type": "FLOAT"
},
{
"variable_name": "incident_severity",
"variable_type": "NUMERIC",
"data_type": "FLOAT"
},
{
"variable_name": "vehicle_claim",
"variable_type": "NUMERIC",
"data_type": "FLOAT"
},
{
"variable_name": "incident_hour",
"variable_type": "NUMERIC",
"data_type": "FLOAT"
},
{
"variable_name": "num_injuries",
"variable_type": "NUMERIC",
"data_type": "FLOAT"
},
{
"variable_name": "num_claims_past_year",
"variable_type": "NUMERIC",
"data_type": "FLOAT"
},
{
"variable_name": "injury_claim",
"variable_type": "NUMERIC",
"data_type": "FLOAT"
},
{
"variable_name": "num_vehicles_involved",
"variable_type": "NUMERIC",
"data_type": "FLOAT"
},
{
"variable_name": "num_witnesses",
"variable_type": "NUMERIC",
"data_type": "FLOAT"
},
{
"variable_name": "incident_type",
"variable_type": "CATEGORICAL",
"data_type": "STRING"
},
{
"variable_name": "police_report_available",
"variable_type": "CATEGORICAL",
"data_type": "STRING"
}
],
"label_mappings": {
"FRAUD": ["fraud"],
"LEGIT": ["legit"]
}
},
"Advertisement_AdClickFraud":
{
"data_path": "data/Advertisement_AdClickFraud_20k.csv",
"variable_mappings": [
{
"variable_name": "ip_address",
"variable_type": "IP_ADDRESS",
"data_type": "STRING"
},
{
"variable_name": "user_agent",
"variable_type": "USERAGENT",
"data_type": "STRING"
},
{
"variable_name": "campaign_id",
"variable_type": "CATEGORICAL",
"data_type": "STRING"
},
{
"variable_name": "publisher_id",
"variable_type": "CATEGORICAL",
"data_type": "STRING"
},
{
"variable_name": "time_between_clicks_minutes",
"variable_type": "NUMERIC",
"data_type": "FLOAT"
},
{
"variable_name": "click_id",
"variable_type": "CATEGORICAL",
"data_type": "STRING"
},
{
"variable_name": "app_category_id",
"variable_type": "CATEGORICAL",
"data_type": "STRING"
}
],
"label_mappings": {
"FRAUD": ["fraud"],
"LEGIT": ["legit"]
}
}
}
| 39.508349
| 91
| 0.325898
| 1,105
| 21,295
| 5.838914
| 0.114027
| 0.16367
| 0.136702
| 0.194358
| 0.811066
| 0.811066
| 0.771079
| 0.764259
| 0.695908
| 0.546962
| 0
| 0.002773
| 0.576661
| 21,295
| 539
| 92
| 39.508349
| 0.712923
| 0
| 0
| 0.472275
| 0
| 0
| 0.326352
| 0.047474
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.001912
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6065cdf1306b7e7a7bcc249359b334450efed9ac
| 55
|
py
|
Python
|
net_ner/task/__init__.py
|
renjunxiang/ccks2019_el
|
67b7b35312c06248ea1deccbfb37cf5d8e5c6376
|
[
"MIT"
] | 99
|
2019-08-01T01:04:54.000Z
|
2022-03-17T09:00:14.000Z
|
net_ner/task/__init__.py
|
ZhouXiaoLeilei/ccks2019_el-1
|
67b7b35312c06248ea1deccbfb37cf5d8e5c6376
|
[
"MIT"
] | 5
|
2019-08-06T02:16:20.000Z
|
2021-12-12T15:37:27.000Z
|
net_ner/task/__init__.py
|
ZhouXiaoLeilei/ccks2019_el-1
|
67b7b35312c06248ea1deccbfb37cf5d8e5c6376
|
[
"MIT"
] | 18
|
2019-08-10T11:18:29.000Z
|
2022-03-15T04:44:52.000Z
|
from .Locate_Entity import Locate_Entity, slice_entity
| 27.5
| 54
| 0.872727
| 8
| 55
| 5.625
| 0.625
| 0.533333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 55
| 1
| 55
| 55
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
60ed1a174d34aacfa7b01dbf21c63f1db5b18160
| 6,838
|
py
|
Python
|
scripts/networkDesign.py
|
johodges/datadriven-wildfire-spread
|
f391c6322b70c28c0016eecc3d324c6078b03dc9
|
[
"Apache-2.0"
] | null | null | null |
scripts/networkDesign.py
|
johodges/datadriven-wildfire-spread
|
f391c6322b70c28c0016eecc3d324c6078b03dc9
|
[
"Apache-2.0"
] | null | null | null |
scripts/networkDesign.py
|
johodges/datadriven-wildfire-spread
|
f391c6322b70c28c0016eecc3d324c6078b03dc9
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 20 14:05:20 2018
@author: JHodges
"""
import tensorflow as tf
def cnnModel3(features, labels, mode):
"""Model function for CNN."""
dconv = True
sz = 50
n_dimensions = 13
#n_dimensions = int(features["x"].get_shape().as_list()[1]/(sz**2))
print("MODE=%s\nInput Dimensions=%s"%(mode,n_dimensions))
ks1 = [10,10]
ks2 = [10,10]
ks3 = [10,10]
fs1 = 32
fs2 = 64
fs3 = 2
# Input Layer
input_layer = tf.reshape(features["x"], [-1, sz, sz, n_dimensions])
dropOut_layer = tf.layers.dropout(input_layer,rate=0.5)
#print(input_layer.shape)
# Convolutional Layer #1
conv1 = tf.layers.conv2d(
inputs=dropOut_layer,
filters=fs1,
kernel_size=ks1,
padding="same",
activation=tf.nn.leaky_relu,
name="conv1")
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
# Convolutional Layer #2 and Pooling Layer #2
conv2 = tf.layers.conv2d(
inputs=pool1,
filters=fs2,
kernel_size=ks2,
padding="same",
activation=tf.nn.leaky_relu,
name="conv2")
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
pool2flat = tf.reshape(pool2,[-1,pool2.shape[1]*pool2.shape[2]*pool2.shape[3]])
if dconv:
dense1 = tf.layers.dense(inputs=pool2flat, units=int(sz*sz*2), activation=tf.nn.leaky_relu)
dense1_rs = tf.reshape(dense1,[-1,sz,sz,2])
dconv1 = tf.layers.conv2d_transpose(
inputs=dense1_rs,filters=fs3,
kernel_size=ks3,
padding="same",
activation=tf.nn.leaky_relu,
name="dconv1")
dconv1flat = tf.reshape(dconv1,[-1,dconv1.shape[1]*dconv1.shape[2]*dconv1.shape[3]])
denseOut = tf.layers.dense(inputs=dconv1flat, units=int(sz*sz*2), activation=tf.nn.tanh)
print("Input Layer Dimensions:\t",input_layer.shape)
print("Dropout Layer Dimensions:\t",dropOut_layer.shape)
print("First Conv Layer Dim:\t",conv1.shape)
print("First Pool Layer Dim:\t",pool1.shape)
print("Second Conv Layer Dim:\t", conv2.shape)
print("Second Pool Layer Dim:\t", pool2.shape)
print("Classify Layer Dim:\t", dense1.shape)
print("Deconv Layer Dim:\t", dconv1.shape)
print("Output Layer Dim:\t",denseOut.shape)
else:
denseOut = tf.layers.dense(inputs=pool2flat, units=int(sz*sz*2), activation=tf.nn.tanh)
logits = tf.reshape(denseOut,[-1,int(sz*sz*2)])
predicted_classes = tf.argmax(input=tf.reshape(dense1,[-1,int(sz*sz),2]), axis=2)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'class_ids': predicted_classes,#[:, tf.newaxis],
'probabilities': tf.nn.softmax(logits),
'logits': logits,
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
loss = tf.reduce_sum(abs(tf.cast(labels,tf.float32)-tf.cast(logits,tf.float32))**2)**0.5
label_rs = tf.reshape(labels,[-1,int(sz*sz),2])
label_classes = tf.argmax(input=label_rs,axis=2)
accuracy = tf.metrics.accuracy(labels=label_classes,predictions=predicted_classes,name='acc_op')
metrics = {'accuracy': accuracy}
tf.summary.scalar('accuracy', accuracy[1])
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(mode,loss=loss,eval_metric_ops=metrics)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=10**-4)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
def cnnModel4(features, labels, mode):
"""Model function for CNN."""
dropoutRate = 0.25 if mode == tf.estimator.ModeKeys.TRAIN else 0.0
(sz,n_dimensions) = (50,13)
(ks1,fs1,ks2,fs2,ks3,fs3) = ([10,10],32,[10,10],64,[10,10],2)
lrelu = tf.nn.leaky_relu
#n_dimensions = int(features["x"].get_shape().as_list()[1]/(sz**2))
print("MODE=%s\nInput Dimensions=%s"%(mode,n_dimensions))
# Input Layer
input_layer = tf.reshape(features["x"], [-1, sz, sz, n_dimensions])
# Convolutional Layer #1
conv1 = tf.layers.conv2d(inputs=input_layer,filters=fs1,kernel_size=ks1,padding="same",activation=lrelu,name="conv1")
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
# Convolutional Layer #2 and Pooling Layer #2
conv2 = tf.layers.conv2d(inputs=pool1,filters=fs2,kernel_size=ks2,padding="same",activation=lrelu,name="conv2")
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
pool2flat = tf.reshape(pool2,[-1,pool2.shape[1]*pool2.shape[2]*pool2.shape[3]])
# Dense classification layer
dense1 = tf.layers.dense(inputs=pool2flat, units=int(sz*sz*2), activation=lrelu)
# Dropout layer
dropOut_layer = tf.layers.dropout(dense1,rate=dropoutRate)
dense1_rs = tf.reshape(dropOut_layer,[-1,sz,sz,2])
dconv1 = tf.layers.conv2d_transpose(inputs=dense1_rs,filters=fs3,kernel_size=ks3,padding="same",activation=lrelu,name="dconv1")
dconv1flat = tf.reshape(dconv1,[-1,dconv1.shape[1]*dconv1.shape[2]*dconv1.shape[3]])
# Output layer
denseOut = tf.layers.dense(inputs=dconv1flat, units=int(sz*sz*2), activation=tf.nn.tanh)
logits = tf.reshape(denseOut,[-1,int(sz*sz*2)])
predicted_classes = tf.argmax(input=tf.reshape(dense1,[-1,int(sz*sz),2]), axis=2)
# Print sizes for debugging
print("Input Layer Dimensions:\t",input_layer.shape)
print("First Conv Layer Dim:\t",conv1.shape)
print("First Pool Layer Dim:\t",pool1.shape)
print("Second Conv Layer Dim:\t", conv2.shape)
print("Second Pool Layer Dim:\t", pool2.shape)
print("Classify Layer Dim:\t", dense1.shape)
print("Dropout Layer Dimensions:\t",dropOut_layer.shape)
print("Deconv Layer Dim:\t", dconv1.shape)
print("Output Layer Dim:\t",denseOut.shape)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {'class_ids': predicted_classes,'probabilities': tf.nn.softmax(logits),'logits': logits}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
#loss = tf.reduce_sum(abs(tf.cast(labels,tf.float32)-tf.cast(logits,tf.float32))**2)**0.5
loss = tf.losses.sigmoid_cross_entropy(labels,logits)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=10**-4)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
| 43.278481
| 131
| 0.65253
| 951
| 6,838
| 4.605678
| 0.162986
| 0.03105
| 0.028767
| 0.018265
| 0.835616
| 0.808219
| 0.80137
| 0.774886
| 0.74589
| 0.708219
| 0
| 0.045356
| 0.193916
| 6,838
| 157
| 132
| 43.55414
| 0.749274
| 0.09067
| 0
| 0.486486
| 0
| 0
| 0.097427
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018018
| false
| 0
| 0.009009
| 0
| 0.072072
| 0.18018
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
71c62ab4879ea1f548ed7cf43cbd971c494b3dbd
| 25
|
py
|
Python
|
labtools/__main__.py
|
Jejulia/labtools
|
fbd5e6d9857e2403feda47fd683e7a2d0532b600
|
[
"MIT"
] | null | null | null |
labtools/__main__.py
|
Jejulia/labtools
|
fbd5e6d9857e2403feda47fd683e7a2d0532b600
|
[
"MIT"
] | null | null | null |
labtools/__main__.py
|
Jejulia/labtools
|
fbd5e6d9857e2403feda47fd683e7a2d0532b600
|
[
"MIT"
] | null | null | null |
import labtools as lb
| 5
| 21
| 0.72
| 4
| 25
| 4.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.28
| 25
| 4
| 22
| 6.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
71cb60f27edd018e3882af6dccfb9181ed5d4dcb
| 44
|
py
|
Python
|
gym_fabrikatioRL/__init__.py
|
malerinc/fabricatio-rl
|
414ec7cacd0e4316882bb93109930ad8c257cf7f
|
[
"MIT"
] | 3
|
2021-08-09T15:40:36.000Z
|
2022-03-18T07:31:16.000Z
|
gym_fabrikatioRL/__init__.py
|
malerinc/fabricatio-rl
|
414ec7cacd0e4316882bb93109930ad8c257cf7f
|
[
"MIT"
] | null | null | null |
gym_fabrikatioRL/__init__.py
|
malerinc/fabricatio-rl
|
414ec7cacd0e4316882bb93109930ad8c257cf7f
|
[
"MIT"
] | null | null | null |
from gym.envs.registration import register
| 14.666667
| 42
| 0.840909
| 6
| 44
| 6.166667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113636
| 44
| 2
| 43
| 22
| 0.948718
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e0b3b5d46f632529071d69810a97f8aa2d59e9dd
| 21
|
py
|
Python
|
invoke_tools/cloud/__init__.py
|
VJftw/invoke-tools
|
9584a1f8a402118310b6f2a495062f388fc8dc3a
|
[
"MIT"
] | 2
|
2017-07-02T02:46:58.000Z
|
2018-07-24T03:36:30.000Z
|
invoke_tools/cloud/__init__.py
|
VJftw/invoke-tools
|
9584a1f8a402118310b6f2a495062f388fc8dc3a
|
[
"MIT"
] | null | null | null |
invoke_tools/cloud/__init__.py
|
VJftw/invoke-tools
|
9584a1f8a402118310b6f2a495062f388fc8dc3a
|
[
"MIT"
] | 1
|
2019-11-27T14:43:03.000Z
|
2019-11-27T14:43:03.000Z
|
from .aws import AWS
| 10.5
| 20
| 0.761905
| 4
| 21
| 4
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.190476
| 21
| 1
| 21
| 21
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e0be315d5602fa72ecd0629df16722e7a4480341
| 134
|
py
|
Python
|
__init__.py
|
fuzzyVineStone/PygamePlus
|
e17e536c82aec5275d21261b233f5b1dd2083747
|
[
"MIT"
] | null | null | null |
__init__.py
|
fuzzyVineStone/PygamePlus
|
e17e536c82aec5275d21261b233f5b1dd2083747
|
[
"MIT"
] | null | null | null |
__init__.py
|
fuzzyVineStone/PygamePlus
|
e17e536c82aec5275d21261b233f5b1dd2083747
|
[
"MIT"
] | null | null | null |
from .audio import *
from .event import *
from .graphics import *
from .keyboard import *
from .physics import *
from .time import *
| 16.75
| 23
| 0.723881
| 18
| 134
| 5.388889
| 0.444444
| 0.515464
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.186567
| 134
| 7
| 24
| 19.142857
| 0.889908
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e0e160fd2d76274dc3ea7103fefc3d37e92db99b
| 4,513
|
py
|
Python
|
data/level/level10216.py
|
levelupai/match3-level-similarity
|
cc9b28b8741b41bea1273c8bc9b4d265d79a1dca
|
[
"Apache-2.0"
] | null | null | null |
data/level/level10216.py
|
levelupai/match3-level-similarity
|
cc9b28b8741b41bea1273c8bc9b4d265d79a1dca
|
[
"Apache-2.0"
] | 6
|
2020-07-04T02:53:08.000Z
|
2022-03-11T23:53:14.000Z
|
data/level/level10216.py
|
levelupai/match3-level-similarity
|
cc9b28b8741b41bea1273c8bc9b4d265d79a1dca
|
[
"Apache-2.0"
] | 3
|
2019-12-31T11:42:59.000Z
|
2021-03-28T20:06:13.000Z
|
data = {'level_index': 10216, 'move_count': '24',
'board_info': {(-7, 14): {}, (-7, 13): {}, (-4, 13): {'next': (0, -1), 'prev': (1, 0)},
(-4, 12): {'next': (0, -1), 'prev': (0, 1)}, (-4, 11): {'next': (0, -1), 'prev': (0, 1)},
(-4, 10): {'next': (0, -1), 'prev': (0, 1)}, (-4, 9): {'next': (0, -1), 'prev': (0, 1)},
(-4, 8): {'next': (0, -1), 'prev': (0, 1)}, (-4, 7): {'next': (0, -1), 'prev': (0, 1)},
(-4, 6): {'next': (0, -1), 'prev': (0, 1)}, (-4, 5): {'next': (1, 0), 'prev': (0, 1)},
(-3, 13): {'next': (-1, 0), 'prev': (1, 0)}, (-3, 12): {'next': (0, -1), 'prev': (1, 0)},
(-3, 11): {'next': (0, -1), 'prev': (0, 1)}, (-3, 10): {'next': (0, -1), 'prev': (0, 1)},
(-3, 9): {'next': (0, -1), 'prev': (0, 1)}, (-3, 8): {'next': (0, -1), 'prev': (0, 1)},
(-3, 7): {'next': (0, -1), 'prev': (0, 1)}, (-3, 6): {'next': (1, 0), 'prev': (0, 1)},
(-3, 5): {'next': (1, 0), 'prev': (-1, 0)}, (-2, 13): {'next': (-1, 0), 'prev': (1, 0)},
(-2, 12): {'next': (-1, 0), 'prev': (1, 0)}, (-2, 11): {'next': (0, -1), 'prev': (1, 0)},
(-2, 10): {'next': (0, -1), 'prev': (0, 1)}, (-2, 9): {'next': (0, -1), 'prev': (0, 1)},
(-2, 8): {'next': (0, -1), 'prev': (0, 1)}, (-2, 7): {'next': (1, 0), 'prev': (0, 1)},
(-2, 6): {'next': (1, 0), 'prev': (-1, 0)}, (-2, 5): {'next': (1, 0), 'prev': (-1, 0)},
(-1, 13): {'next': (-1, 0), 'fall_point': (1, 0)},
(-1, 12): {'next': (-1, 0), 'fall_point': (1, 0)},
(-1, 11): {'next': (-1, 0), 'fall_point': (1, 0)}, (-1, 9): {'next': (1, 0)},
(-1, 8): {'next': (1, 0)}, (-1, 7): {'next': (1, 0), 'prev': (-1, 0)},
(-1, 6): {'next': (1, 0), 'prev': (-1, 0)}, (-1, 5): {'next': (1, 0), 'prev': (-1, 0)},
(0, 9): {'next': (1, 0), 'prev': (-1, 0)}, (0, 8): {'next': (1, 0), 'prev': (-1, 0)},
(0, 7): {'next': (1, 0), 'prev': (-1, 0)}, (0, 6): {'next': (1, 0), 'prev': (-1, 0)},
(0, 5): {'next': (1, 0), 'prev': (-1, 0)}, (1, 7): {'next': (1, 0), 'prev': (-1, 0)},
(1, 6): {'next': (1, 0), 'prev': (-1, 0)}, (1, 5): {'next': (1, 0), 'prev': (-1, 0)}, (2, 9): {},
(2, 8): {}, (2, 7): {'next': (1, 0), 'prev': (-1, 0)}, (2, 6): {'next': (1, 0), 'prev': (-1, 0)},
(2, 5): {'next': (1, 0), 'prev': (-1, 0)}, (3, 13): {'next': (-1, 0), 'prev': (1, 0)},
(3, 12): {'next': (-1, 0), 'prev': (1, 0)}, (3, 11): {'next': (-1, 0), 'prev': (1, 0)},
(3, 9): {}, (3, 8): {}, (3, 7): {'next': (1, 0), 'prev': (-1, 0)},
(3, 6): {'next': (1, 0), 'prev': (-1, 0)}, (3, 5): {'next': (1, 0), 'prev': (-1, 0)},
(4, 13): {'next': (-1, 0), 'prev': (1, 0)}, (4, 12): {'next': (-1, 0), 'prev': (1, 0)},
(4, 11): {'next': (-1, 0)}, (4, 10): {}, (4, 9): {}, (4, 8): {}, (4, 7): {'prev': (-1, 0)},
(4, 6): {'next': (1, 0), 'prev': (-1, 0)}, (4, 5): {'next': (1, 0), 'prev': (-1, 0)},
(5, 13): {'next': (-1, 0), 'prev': (1, 0)}, (5, 12): {'next': (-1, 0)}, (5, 11): {}, (5, 10): {},
(5, 9): {}, (5, 8): {}, (5, 7): {}, (5, 6): {'prev': (-1, 0)},
(5, 5): {'next': (1, 0), 'prev': (-1, 0)}, (6, 13): {'next': (-1, 0)}, (6, 12): {}, (6, 11): {},
(6, 10): {}, (6, 9): {}, (6, 8): {}, (6, 7): {}, (6, 6): {}, (6, 5): {'prev': (-1, 0)}},
'trans_info': {(0, 0): {}},
'wall_info': [[(-3, 12), (-3, 11)], [(-3, 12), (-2, 12)], [(-3, 11), (-2, 11)], [(-3, 10), (-2, 10)],
[(-3, 9), (-2, 9)], [(-3, 8), (-2, 8)], [(-3, 7), (-3, 6)], [(-3, 7), (-2, 7)],
[(-3, 6), (-2, 6)], [(-2, 12), (-2, 11)], [(-2, 7), (-2, 6)], [(-1, 7), (-1, 6)],
[(0, 7), (0, 6)], [(1, 7), (1, 6)], [(2, 7), (2, 6)], [(3, 7), (3, 6)], [(4, 12), (4, 11)],
[(4, 12), (5, 12)], [(4, 11), (5, 11)], [(4, 10), (5, 10)], [(4, 9), (5, 9)], [(4, 8), (5, 8)],
[(4, 7), (4, 6)], [(4, 7), (5, 7)], [(4, 6), (5, 6)], [(5, 12), (5, 11)], [(5, 7), (5, 6)]]}
| 102.568182
| 120
| 0.250609
| 656
| 4,513
| 1.71189
| 0.045732
| 0.149599
| 0.229742
| 0.311665
| 0.731077
| 0.720392
| 0.672306
| 0.252894
| 0.167409
| 0.167409
| 0
| 0.197881
| 0.351651
| 4,513
| 43
| 121
| 104.953488
| 0.185919
| 0
| 0
| 0
| 0
| 0
| 0.12187
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e0fa55e4200d0e84f069807b27139c601e515801
| 6,505
|
py
|
Python
|
tests/nlu_core_tests/training_tests/classifiers/multi_classifier_dl_tests.py
|
askmetoo/nlu
|
e9ce74895ac44f6b61610f0926e4ab08759f04f7
|
[
"Apache-2.0"
] | 1
|
2021-12-26T07:56:38.000Z
|
2021-12-26T07:56:38.000Z
|
tests/nlu_core_tests/training_tests/classifiers/multi_classifier_dl_tests.py
|
avanishiitk/nlu
|
ca244fe422923084196f6f6f4c5c41723e46d537
|
[
"Apache-2.0"
] | null | null | null |
tests/nlu_core_tests/training_tests/classifiers/multi_classifier_dl_tests.py
|
avanishiitk/nlu
|
ca244fe422923084196f6f6f4c5c41723e46d537
|
[
"Apache-2.0"
] | null | null | null |
from sklearn.metrics import classification_report
import unittest
from nlu import *
import tests.test_utils as t
import pandas as pd
class MultiClassifierDlTests(unittest.TestCase):
def test_multi_classifier_dl_training(self):
# The y column must be a string seperated with ```,``` . Custom seperators can be configured by passing
test_df = self.load_multi_classifier_dl_dataset()
# test_df.columns = ['y_str','text']
# test_df['y'] = test_df.y_str.str.split(',')
# test_df.y = test_df.y.astype('stringArray')#pd.arrays.
# test_df.y = test_df.y.astype(list[str])#pd.arrays.
print(test_df.y)
print(test_df)
print(test_df.dtypes)
# test_df.drop('y_str',inplace=True,axis=1)
train_df = test_df
pipe = nlu.load('train.multi_classifier',verbose=True,)
#: java.lang.IllegalArgumentException: requirement failed: The label column MultiClassifierDLApproach_cbfe97978b3c__labelColumn type is StringType and it's not compatible. Compatible types are ArrayType(StringType).
# pipe['multi_classifier_dl'].setMaxEpochs(2)
# pipe.print_info()
pipe = pipe.fit(train_df)
df = pipe.predict(train_df)
print(df.columns)
for c in df.columns : print (df[c])
#
# print(df[['multi_classifier_classes','y']])
# print(df[['multi_classifier_confidences','y']])
df = pipe.predict(test_df)
print(df.columns)
for c in df.columns : print (df[c])
# print(df[['multi_classifier_classes','y']])
# print(df[['multi_classifier_confidence','y']])
df.dropna(inplace=True)
# print (classification_report(df['y'], df['multi_classifier_classes']))
# Too heavy running on github actions
#
# def test_multi_classifier_dl_custom_embeds_doc_level(self):
# test_df = self.load_multi_classifier_dl_dataset()
# # test_df.columns = ['y_str','text']
# test_df.columns = ['y','text']
#
#
#
# print(test_df.y)
# print(test_df)
# print(test_df.dtypes)
#
# # test_df.drop('y_str',inplace=True,axis=1)
# train_df = test_df
#
# pipe = nlu.load('embed_sentence.bert train.multi_classifier',verbose=True,)
# #: java.lang.IllegalArgumentException: requirement failed: The label column MultiClassifierDLApproach_cbfe97978b3c__labelColumn type is StringType and it's not compatible. Compatible types are ArrayType(StringType).
#
# # pipe['multi_classifier_dl'].setMaxEpochs(2)
# pipe.print_info()
# pipe = pipe.fit(train_df)
# df = pipe.predict(train_df)
# print(df.columns)
# print(df[['multi_classifier','y']])
# print(df[['multi_classifier_confidence','y']])
# df = pipe.predict(test_df)
# print(df.columns)
# print(df[['multi_classifier','y']])
# print(df[['multi_classifier_confidence','y']])
# df.dropna(inplace=True)
# print (classification_report(df['y'], df['multi_classifier']))
#
# def test_multi_classifier_dl_custom_embeds_sentence_level(self):
# test_path = self.load_multi_classifier_dl_dataset()
# test_df = pd.read_csv(test_path)
# train_df = test_df
# train_df.columns = ['y','text']
# test_df.columns = ['y','text']
# pipe = nlu.load('embed_sentence.bert train.multi_classifier',verbose=True,)
# pipe['multi_classifier_dl'].setMaxEpochs(2)
# pipe = pipe.fit(train_df)
# df = pipe.predict(train_df, output_level='sentence')
# print(df.columns)
# print(df[['category','y']])
# df = pipe.predict(test_df, output_level='sentence')
# print(df.columns)
# print(df[['category','y']])
# # Eval results
# from sklearn.metrics import classification_report
# print (classification_report(df['y'], df['category']))
#
#
# def test_multi_classifier_dl_custom_embeds_auto_level(self):
# test_path = self.load_multi_classifier_dl_dataset()
# test_df = pd.read_csv(test_path)
# train_df = test_df
# train_df.columns = ['y','text']
# test_df.columns = ['y','text']
# pipe = nlu.load('embed_sentence.bert train.multi_classifier',verbose=True,)
# pipe['multi_classifier_dl'].setMaxEpochs(2)
# pipe = pipe.fit(train_df)
# df = pipe.predict(train_df)
# print(df.columns)
# print(df[['category','y']])
# df = pipe.predict(test_df)
# print(df.columns)
# print(df[['category','y']])
# # Eval results
# from sklearn.metrics import classification_report
# print (classification_report(df['y'], df['category']))
# def load_multi_classifier_dl_dataset(self):
# #relative from tests/nlu_core_tests/training_tests/classifiers
# p = '/home/loan/Documents/freelancework/jsl/nlu/4realnlugit/tests/datasets/multi_classifier_dl/e2e-dataset/testset_w_refs.csv'
# return pd.read_csv(p)
def load_multi_classifier_dl_dataset(self):
output_file_name = 'e2e_test.csv'
output_folder = 'multi_classifier_dl/'
# data_url = "http://ckl-it.de/wp-content/uploads/2020/12/testset_w_refs.csv"
data_url = "http://ckl-it.de/wp-content/uploads/2020/12/e2e.csv"
return pd.read_csv(t.download_dataset(data_url,output_file_name,output_folder)).iloc[0:100]
# output_file_name = 'news_category_test.csv'
# output_folder = 'multi_classifier_dl/'
# data_dir = '../../../datasets/'
# data_url = "https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/resources/en/classifier-dl/news_Category/news_category_test.csv"
# return t.download_dataset(data_url,output_file_name,output_folder,data_dir)
#
# def load_classifier_dl_dataset(self):
# #relative from tests/nlu_core_tests/training_tests/classifiers
# output_file_name = 'news_category_test.csv'
# output_folder = 'classifier_dl/'
# data_dir = t.create_dataset_dir_if_not_exist_and_get_path()
# t.create_path_if_not_exist(data_dir + output_file_name)
# data_url = "https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/resources/en/classifier-dl/news_Category/news_category_test.csv"
# return t.download_dataset(data_url,output_file_name,output_folder,data_dir)
if __name__ == '__main__':
unittest.main()
| 41.433121
| 225
| 0.652267
| 833
| 6,505
| 4.801921
| 0.193277
| 0.11625
| 0.07225
| 0.032
| 0.851
| 0.836
| 0.82
| 0.77275
| 0.7585
| 0.7315
| 0
| 0.007834
| 0.215065
| 6,505
| 156
| 226
| 41.698718
| 0.775558
| 0.71837
| 0
| 0.142857
| 0
| 0.035714
| 0.065966
| 0.012843
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.178571
| 0
| 0.321429
| 0.25
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4603d3536529da1f700d39e4f9811b735a279b61
| 34
|
py
|
Python
|
Week10/Day5/sortingtry.py
|
malharlakdawala/DevelopersInstitute
|
3d6a9fb0002670878105d983edca432f635bce6d
|
[
"MIT"
] | null | null | null |
Week10/Day5/sortingtry.py
|
malharlakdawala/DevelopersInstitute
|
3d6a9fb0002670878105d983edca432f635bce6d
|
[
"MIT"
] | null | null | null |
Week10/Day5/sortingtry.py
|
malharlakdawala/DevelopersInstitute
|
3d6a9fb0002670878105d983edca432f635bce6d
|
[
"MIT"
] | 1
|
2021-10-09T19:01:08.000Z
|
2021-10-09T19:01:08.000Z
|
array = [6, 5, 12, 10, 9, 1]
| 4.857143
| 28
| 0.382353
| 7
| 34
| 1.857143
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.380952
| 0.382353
| 34
| 6
| 29
| 5.666667
| 0.238095
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.