hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e53e62043301ca1e5e6b8ceb952823fefff00fe8 | 93 | py | Python | src/Ngl.py | yang69can/pyngl | 78a7040ce9de4b7a442b0c3b5faecccab2f01426 | [
"Apache-2.0"
] | 125 | 2016-11-24T09:04:28.000Z | 2022-01-22T14:06:56.000Z | src/Ngl.py | yang69can/pyngl | 78a7040ce9de4b7a442b0c3b5faecccab2f01426 | [
"Apache-2.0"
] | 52 | 2017-11-08T23:23:02.000Z | 2022-03-20T03:17:39.000Z | src/Ngl.py | yang69can/pyngl | 78a7040ce9de4b7a442b0c3b5faecccab2f01426 | [
"Apache-2.0"
] | 25 | 2017-08-27T10:50:43.000Z | 2022-01-29T14:56:05.000Z | from ngl import __all__ as _ngl_all
from ngl import *
__all__ = []
__all__.extend(_ngl_all)
| 15.5 | 35 | 0.763441 | 15 | 93 | 3.666667 | 0.4 | 0.254545 | 0.472727 | 0.581818 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.16129 | 93 | 5 | 36 | 18.6 | 0.705128 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.5 | 0 | 0.5 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
f92423f81dab78a122c0d37afc06e4c115b48450 | 113 | py | Python | Lesson 5 Miniflow/node_add.py | alchemz/Self-Driving-Car-Engineer-Nanodegree | 70d6ae9d741b6c53712e0099af04597dc0ba0291 | [
"MIT"
] | 1 | 2021-03-20T12:32:35.000Z | 2021-03-20T12:32:35.000Z | Lesson 5 Miniflow/node_add.py | alchemz/Self-Driving-Car-Engineer-Nanodegree | 70d6ae9d741b6c53712e0099af04597dc0ba0291 | [
"MIT"
] | null | null | null | Lesson 5 Miniflow/node_add.py | alchemz/Self-Driving-Car-Engineer-Nanodegree | 70d6ae9d741b6c53712e0099af04597dc0ba0291 | [
"MIT"
] | null | null | null | class Add(Node):
def __init__(self, x, y):
Node.__init__(self, [x, y])
def forward(self):
"""
quiz
""" | 14.125 | 29 | 0.575221 | 17 | 113 | 3.352941 | 0.588235 | 0.280702 | 0.315789 | 0.350877 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.212389 | 113 | 8 | 30 | 14.125 | 0.640449 | 0.035398 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.5 | false | 0 | 0 | 0 | 0.75 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
007493a89e2c278c298fed5b60d7e95906a21225 | 3,647 | py | Python | etl/parsers/etw/Microsoft_Windows_Hyper_V_Guest_Drivers_IcSvc.py | IMULMUL/etl-parser | 76b7c046866ce0469cd129ee3f7bb3799b34e271 | [
"Apache-2.0"
] | 104 | 2020-03-04T14:31:31.000Z | 2022-03-28T02:59:36.000Z | etl/parsers/etw/Microsoft_Windows_Hyper_V_Guest_Drivers_IcSvc.py | IMULMUL/etl-parser | 76b7c046866ce0469cd129ee3f7bb3799b34e271 | [
"Apache-2.0"
] | 7 | 2020-04-20T09:18:39.000Z | 2022-03-19T17:06:19.000Z | etl/parsers/etw/Microsoft_Windows_Hyper_V_Guest_Drivers_IcSvc.py | IMULMUL/etl-parser | 76b7c046866ce0469cd129ee3f7bb3799b34e271 | [
"Apache-2.0"
] | 16 | 2020-03-05T18:55:59.000Z | 2022-03-01T10:19:28.000Z | # -*- coding: utf-8 -*-
"""
Microsoft-Windows-Hyper-V-Guest-Drivers-IcSvc
GUID : c18672d1-dc18-4dfd-91e4-170cf37160cf
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("c18672d1-dc18-4dfd-91e4-170cf37160cf"), event_id=1, version=0)
class Microsoft_Windows_Hyper_V_Guest_Drivers_IcSvc_1_0(Etw):
pattern = Struct(
"Status" / Int32sl
)
@declare(guid=guid("c18672d1-dc18-4dfd-91e4-170cf37160cf"), event_id=2, version=0)
class Microsoft_Windows_Hyper_V_Guest_Drivers_IcSvc_2_0(Etw):
pattern = Struct(
"Status" / Int32sl
)
@declare(guid=guid("c18672d1-dc18-4dfd-91e4-170cf37160cf"), event_id=3, version=0)
class Microsoft_Windows_Hyper_V_Guest_Drivers_IcSvc_3_0(Etw):
pattern = Struct(
"Status" / Int32sl
)
@declare(guid=guid("c18672d1-dc18-4dfd-91e4-170cf37160cf"), event_id=4, version=0)
class Microsoft_Windows_Hyper_V_Guest_Drivers_IcSvc_4_0(Etw):
pattern = Struct(
"Status" / Int32sl
)
@declare(guid=guid("c18672d1-dc18-4dfd-91e4-170cf37160cf"), event_id=5, version=0)
class Microsoft_Windows_Hyper_V_Guest_Drivers_IcSvc_5_0(Etw):
pattern = Struct(
"Volume" / WString
)
@declare(guid=guid("c18672d1-dc18-4dfd-91e4-170cf37160cf"), event_id=22, version=0)
class Microsoft_Windows_Hyper_V_Guest_Drivers_IcSvc_22_0(Etw):
pattern = Struct(
"Status" / Int32sl
)
@declare(guid=guid("c18672d1-dc18-4dfd-91e4-170cf37160cf"), event_id=23, version=0)
class Microsoft_Windows_Hyper_V_Guest_Drivers_IcSvc_23_0(Etw):
pattern = Struct(
"Name" / WString,
"Writerstatus" / Int32sl,
"Status" / Int32sl
)
@declare(guid=guid("c18672d1-dc18-4dfd-91e4-170cf37160cf"), event_id=24, version=0)
class Microsoft_Windows_Hyper_V_Guest_Drivers_IcSvc_24_0(Etw):
pattern = Struct(
"DiskNumber" / Int32ul
)
@declare(guid=guid("c18672d1-dc18-4dfd-91e4-170cf37160cf"), event_id=3584, version=0)
class Microsoft_Windows_Hyper_V_Guest_Drivers_IcSvc_3584_0(Etw):
pattern = Struct(
"TraceData" / WString,
"VmName" / WString,
"VmId" / WString,
"StackFrameCount" / Int32ul,
"StackFrame" / Int64ul,
"ModuleCount" / Int32ul,
"Module" / Int32sl
)
@declare(guid=guid("c18672d1-dc18-4dfd-91e4-170cf37160cf"), event_id=3585, version=0)
class Microsoft_Windows_Hyper_V_Guest_Drivers_IcSvc_3585_0(Etw):
pattern = Struct(
"TraceData" / WString,
"VmName" / WString,
"VmId" / WString,
"StackFrameCount" / Int32ul,
"StackFrame" / Int64ul,
"ModuleCount" / Int32ul,
"Module" / Int32sl
)
@declare(guid=guid("c18672d1-dc18-4dfd-91e4-170cf37160cf"), event_id=3586, version=0)
class Microsoft_Windows_Hyper_V_Guest_Drivers_IcSvc_3586_0(Etw):
pattern = Struct(
"TraceData" / WString,
"VmName" / WString,
"VmId" / WString,
"StackFrameCount" / Int32ul,
"StackFrame" / Int64ul,
"ModuleCount" / Int32ul,
"Module" / Int32sl
)
@declare(guid=guid("c18672d1-dc18-4dfd-91e4-170cf37160cf"), event_id=3587, version=0)
class Microsoft_Windows_Hyper_V_Guest_Drivers_IcSvc_3587_0(Etw):
pattern = Struct(
"TraceData" / WString,
"VmName" / WString,
"VmId" / WString,
"StackFrameCount" / Int32ul,
"StackFrame" / Int64ul,
"ModuleCount" / Int32ul,
"Module" / Int32sl
)
| 30.140496 | 123 | 0.68714 | 441 | 3,647 | 5.437642 | 0.163265 | 0.086739 | 0.113845 | 0.119266 | 0.841535 | 0.841535 | 0.826522 | 0.810259 | 0.810259 | 0.810259 | 0 | 0.136287 | 0.189197 | 3,647 | 120 | 124 | 30.391667 | 0.67467 | 0.03071 | 0 | 0.511111 | 0 | 0 | 0.211004 | 0.122518 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.044444 | 0 | 0.311111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
0099349e667f0003ac13dfa64f66b847fb0bacf3 | 109 | py | Python | dtlutils/__init__.py | ed741/DTL | c1a4af39c8be8891f76a4dcc3834717ec484e720 | [
"MIT"
] | null | null | null | dtlutils/__init__.py | ed741/DTL | c1a4af39c8be8891f76a4dcc3834717ec484e720 | [
"MIT"
] | null | null | null | dtlutils/__init__.py | ed741/DTL | c1a4af39c8be8891f76a4dcc3834717ec484e720 | [
"MIT"
] | null | null | null | from dtlutils.names import NameGenerator # noqa: F401
from dtlutils.visualize import plot_dag # noqa: F401
| 36.333333 | 54 | 0.798165 | 15 | 109 | 5.733333 | 0.666667 | 0.27907 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.064516 | 0.146789 | 109 | 2 | 55 | 54.5 | 0.860215 | 0.192661 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
00cce7642706851953b5ea5986a053d817cd2e3e | 158 | py | Python | example/manage.py | viniciuschiele/flask-apidoc | d807a4c945b2c2d0c345ebdf356206f0fc60979c | [
"MIT"
] | 60 | 2015-10-26T11:17:01.000Z | 2021-08-09T03:08:13.000Z | example/manage.py | viniciuschiele/flask-apidoc | d807a4c945b2c2d0c345ebdf356206f0fc60979c | [
"MIT"
] | 13 | 2016-03-17T04:21:44.000Z | 2021-11-27T15:46:34.000Z | example/manage.py | viniciuschiele/flask-apidoc | d807a4c945b2c2d0c345ebdf356206f0fc60979c | [
"MIT"
] | 12 | 2016-03-17T03:36:18.000Z | 2021-11-03T10:25:20.000Z | from .views import app
from flask_apidoc.commands import GenerateApiDoc
app.cli.add_command(GenerateApiDoc(), "apidoc")
if __name__ == "__main__":
pass
| 19.75 | 48 | 0.765823 | 20 | 158 | 5.55 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.132911 | 158 | 7 | 49 | 22.571429 | 0.810219 | 0 | 0 | 0 | 1 | 0 | 0.088608 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.2 | 0.4 | 0 | 0.4 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 6 |
00d594819eeafd16f823a14c6bbd5b87f07b233e | 96 | py | Python | venv/lib/python3.8/site-packages/pip/_vendor/colorama/win32.py | Retraces/UkraineBot | 3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71 | [
"MIT"
] | 2 | 2022-03-13T01:58:52.000Z | 2022-03-31T06:07:54.000Z | venv/lib/python3.8/site-packages/pip/_vendor/colorama/win32.py | DesmoSearch/Desmobot | b70b45df3485351f471080deb5c785c4bc5c4beb | [
"MIT"
] | 19 | 2021-11-20T04:09:18.000Z | 2022-03-23T15:05:55.000Z | venv/lib/python3.8/site-packages/pip/_vendor/colorama/win32.py | DesmoSearch/Desmobot | b70b45df3485351f471080deb5c785c4bc5c4beb | [
"MIT"
] | null | null | null | /home/runner/.cache/pip/pool/6c/9f/08/97d8f0681379049f1b98de85a18675418b8c2afda3f1f1ab5e1ed3263c | 96 | 96 | 0.895833 | 9 | 96 | 9.555556 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.427083 | 0 | 96 | 1 | 96 | 96 | 0.46875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | null | 0 | 0 | null | null | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
00d5f52bd5f5f2722d5ad70607c28d9662000175 | 24 | py | Python | gFunctionDatabase/Data/__init__.py | j-c-cook/gFunctionDatabase | dec8080e3c16bcdb8cf6827a0ba2c9665eeee190 | [
"BSD-3-Clause"
] | 1 | 2021-03-13T11:23:49.000Z | 2021-03-13T11:23:49.000Z | gFunctionDatabase/Data/__init__.py | j-c-cook/gFunctionDatabase | dec8080e3c16bcdb8cf6827a0ba2c9665eeee190 | [
"BSD-3-Clause"
] | 20 | 2021-08-04T23:05:33.000Z | 2022-02-02T17:41:05.000Z | gFunctionDatabase/Data/__init__.py | j-c-cook/gFunctionDatabase | dec8080e3c16bcdb8cf6827a0ba2c9665eeee190 | [
"BSD-3-Clause"
] | 2 | 2021-02-08T18:18:26.000Z | 2021-04-10T02:56:07.000Z | from . import available
| 12 | 23 | 0.791667 | 3 | 24 | 6.333333 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.166667 | 24 | 1 | 24 | 24 | 0.95 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
00d8ef6595872df074957b98874d3bd89733f10b | 128 | py | Python | tabular/src/autogluon/tabular/__init__.py | mseeger/autogluon-1 | e8d82363ce07fd8e3087bcdd2d71c6f6bd8fd7a0 | [
"Apache-2.0"
] | 1 | 2021-03-18T23:35:55.000Z | 2021-03-18T23:35:55.000Z | tabular/src/autogluon/tabular/__init__.py | mseeger/autogluon-1 | e8d82363ce07fd8e3087bcdd2d71c6f6bd8fd7a0 | [
"Apache-2.0"
] | null | null | null | tabular/src/autogluon/tabular/__init__.py | mseeger/autogluon-1 | e8d82363ce07fd8e3087bcdd2d71c6f6bd8fd7a0 | [
"Apache-2.0"
] | null | null | null | import logging
from .task.tabular_prediction import *
logging.basicConfig(format='%(message)s') # just print message in logs
| 21.333333 | 71 | 0.773438 | 17 | 128 | 5.764706 | 0.823529 | 0.265306 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.125 | 128 | 5 | 72 | 25.6 | 0.875 | 0.203125 | 0 | 0 | 0 | 0 | 0.11 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.666667 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
dab06028727e665b78afa34ef75bda200096b471 | 115 | py | Python | supercollider/exceptions.py | ideoforms/python-supercollider | 43b6cfdf41dea48830b767159865a3044a792151 | [
"MIT"
] | 48 | 2019-10-07T14:59:14.000Z | 2022-03-30T04:58:32.000Z | supercollider/exceptions.py | ideoforms/python-supercollider | 43b6cfdf41dea48830b767159865a3044a792151 | [
"MIT"
] | 11 | 2019-10-07T08:48:10.000Z | 2021-07-18T19:55:37.000Z | supercollider/exceptions.py | ideoforms/python-supercollider | 43b6cfdf41dea48830b767159865a3044a792151 | [
"MIT"
] | 2 | 2019-12-17T14:32:20.000Z | 2021-07-11T11:23:58.000Z | class SuperColliderConnectionError (Exception):
pass
class SuperColliderAllocationError (Exception):
pass
| 19.166667 | 47 | 0.8 | 8 | 115 | 11.5 | 0.625 | 0.282609 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.147826 | 115 | 5 | 48 | 23 | 0.938776 | 0 | 0 | 0.5 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.5 | 0 | 0 | 0.5 | 0 | 1 | 0 | 1 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 6 |
dab30803c0887736c57cb5ff033e90f8bb0c3e72 | 27 | py | Python | src/staff_finder/prepare/__init__.py | m-alban/music_learner | 4d4f1835f676becb8fee5824ab54b90b43de8723 | [
"MIT"
] | 1 | 2021-04-22T04:23:38.000Z | 2021-04-22T04:23:38.000Z | src/staff_finder/prepare/__init__.py | m-alban/music_learner | 4d4f1835f676becb8fee5824ab54b90b43de8723 | [
"MIT"
] | null | null | null | src/staff_finder/prepare/__init__.py | m-alban/music_learner | 4d4f1835f676becb8fee5824ab54b90b43de8723 | [
"MIT"
] | null | null | null | from .data_loader import *
| 13.5 | 26 | 0.777778 | 4 | 27 | 5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.148148 | 27 | 1 | 27 | 27 | 0.869565 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
daf9964b9c318aa7f905204dc4d53fd6d57af059 | 6,977 | py | Python | mm/optimize/depth.py | leon-nn/face-fitting | 239c0826f77aaba1c1c77f221f18d733967dfd63 | [
"MIT"
] | 18 | 2018-03-22T21:24:45.000Z | 2021-11-28T15:52:33.000Z | mm/optimize/depth.py | leon-nn/face-fitting | 239c0826f77aaba1c1c77f221f18d733967dfd63 | [
"MIT"
] | null | null | null | mm/optimize/depth.py | leon-nn/face-fitting | 239c0826f77aaba1c1c77f221f18d733967dfd63 | [
"MIT"
] | 3 | 2020-04-08T07:28:10.000Z | 2020-11-13T01:29:45.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""This module contains functions to be used with the scipy.optimize package in order to fit the 3DMM to a target depth map.
"""
import numpy as np
from ..utils.mesh import generateFace
from ..utils.transform import rotMat2angle
from .derivative import dR_dpsi, dR_dtheta, dR_dphi
def initialShapeCost(param, target, model, w = (1, 1)):
# Shape eigenvector coefficients
idCoef = param[: model.numId]
expCoef = param[model.numId: model.numId + model.numExp]
# Landmark fitting cost
source = generateFace(param, model, ind = model.sourceLMInd)
rlan = (source - target.T).flatten('F')
Elan = np.dot(rlan, rlan) / model.sourceLMInd.size
# Regularization cost
Ereg = np.sum(idCoef ** 2 / model.idEval) + np.sum(expCoef ** 2 / model.expEval)
return w[0] * Elan + w[1] * Ereg
def initialShapeGrad(param, target, model, w = (1, 1)):
# Shape eigenvector coefficients
idCoef = param[: model.numId]
expCoef = param[model.numId: model.numId + model.numExp]
# Rotation Euler angles, translation vector, scaling factor
angles = param[model.numId + model.numExp:][:3]
R = rotMat2angle(angles)
t = param[model.numId + model.numExp:][3: 6]
s = param[model.numId + model.numExp:][6]
# The eigenmodel, before rigid transformation and scaling
shape = model.idMean[:, model.sourceLMInd] + np.tensordot(model.idEvec[:, model.sourceLMInd, :], idCoef, axes = 1) + np.tensordot(model.expEvec[:, model.sourceLMInd, :], expCoef, axes = 1)
# After rigid transformation and scaling
source = s*np.dot(R, shape) + t[:, np.newaxis]
rlan = (source - target.T).flatten('F')
drV_dalpha = s*np.tensordot(R, model.idEvec[:, model.sourceLMInd, :], axes = 1)
drV_ddelta = s*np.tensordot(R, model.expEvec[:, model.sourceLMInd, :], axes = 1)
drV_dpsi = s*np.dot(dR_dpsi(angles), shape)
drV_dtheta = s*np.dot(dR_dtheta(angles), shape)
drV_dphi = s*np.dot(dR_dphi(angles), shape)
drV_dt = np.tile(np.eye(3), [model.sourceLMInd.size, 1])
drV_ds = np.dot(R, shape)
Jlan = np.c_[drV_dalpha.reshape((source.size, idCoef.size), order = 'F'), drV_ddelta.reshape((source.size, expCoef.size), order = 'F'), drV_dpsi.flatten('F'), drV_dtheta.flatten('F'), drV_dphi.flatten('F'), drV_dt, drV_ds.flatten('F')]
return 2 * (w[0] * np.dot(Jlan.T, rlan) / model.sourceLMInd.size + w[1] * np.r_[idCoef / model.idEval, expCoef / model.expEval, np.zeros(7)])
def shapeCost(param, model, target, targetLandmarks, NN, w = (1, 1, 1), calcID = True):
# Shape eigenvector coefficients
idCoef = param[: model.numId]
expCoef = param[model.numId: model.numId + model.numExp]
# Transpose target if necessary
if targetLandmarks.shape[0] != 3:
targetLandmarks = targetLandmarks.T
# After rigid transformation and scaling
source = generateFace(param, model)
# Find the nearest neighbors of the target to the source vertices
distance, ind = NN.kneighbors(source.T)
targetNN = target[ind.squeeze(axis = 1), :].T
# Calculate resisduals
rver = (source - targetNN).flatten('F')
rlan = (source[:, model.sourceLMInd] - targetLandmarks).flatten('F')
# Calculate costs
Ever = np.dot(rver, rver) / model.numVertices
Elan = np.dot(rlan, rlan) / model.sourceLMInd.size
if calcID:
Ereg = np.sum(idCoef ** 2 / model.idEval) + np.sum(expCoef ** 2 / model.expEval)
else:
Ereg = np.sum(expCoef ** 2 / model.expEval)
return w[0] * Ever + w[1] * Elan + w[2] * Ereg
def shapeGrad(param, model, target, targetLandmarks, NN, w = (1, 1, 1), calcID = True):
# Shape eigenvector coefficients
idCoef = param[: model.numId]
expCoef = param[model.numId: model.numId + model.numExp]
# Rotation Euler angles, translation vector, scaling factor
angles = param[model.numId + model.numExp:][:3]
R = rotMat2angle(angles)
t = param[model.numId + model.numExp:][3: 6]
s = param[model.numId + model.numExp:][6]
# Transpose if necessary
if targetLandmarks.shape[0] != 3:
targetLandmarks = targetLandmarks.T
# The eigenmodel, before rigid transformation and scaling
shape = model.idMean + np.tensordot(model.idEvec, idCoef, axes = 1) + np.tensordot(model.expEvec, expCoef, axes = 1)
# After rigid transformation and scaling
source = s*np.dot(R, shape) + t[:, np.newaxis]
# Find the nearest neighbors of the target to the source vertices
distance, ind = NN.kneighbors(source.T)
targetNN = target[ind.squeeze(axis = 1), :].T
# Calculate resisduals
rver = (source - targetNN).flatten('F')
rlan = (source[:, model.sourceLMInd] - targetLandmarks).flatten('F')
drV_ddelta = s*np.tensordot(R, model.expEvec, axes = 1)
drV_dpsi = s*np.dot(dR_dpsi(angles), shape)
drV_dtheta = s*np.dot(dR_dtheta(angles), shape)
drV_dphi = s*np.dot(dR_dphi(angles), shape)
drV_dt = np.tile(np.eye(3), [model.numVertices, 1])
drV_ds = np.dot(R, shape)
if calcID:
drV_dalpha = s*np.tensordot(R, model.idEvec, axes = 1)
Jver = np.c_[drV_dalpha.reshape((source.size, idCoef.size), order = 'F'), drV_ddelta.reshape((source.size, expCoef.size), order = 'F'), drV_dpsi.flatten('F'), drV_dtheta.flatten('F'), drV_dphi.flatten('F'), drV_dt, drV_ds.flatten('F')]
Jlan = np.c_[drV_dalpha[:, model.sourceLMInd, :].reshape((targetLandmarks.size, idCoef.size), order = 'F'), drV_ddelta[:, model.sourceLMInd, :].reshape((targetLandmarks.size, expCoef.size), order = 'F'), drV_dpsi[:, model.sourceLMInd].flatten('F'), drV_dtheta[:, model.sourceLMInd].flatten('F'), drV_dphi[:, model.sourceLMInd].flatten('F'), drV_dt[:model.sourceLMInd.size * 3, :], drV_ds[:, model.sourceLMInd].flatten('F')]
return 2 * (w[0] * np.dot(Jver.T, rver) / model.numVertices + w[1] * np.dot(Jlan.T, rlan) / model.sourceLMInd.size + w[2] * np.r_[idCoef / model.idEval, expCoef / model.expEval, np.zeros(7)])
else:
Jver = np.c_[drV_ddelta.reshape((source.size, expCoef.size), order = 'F'), drV_dpsi.flatten('F'), drV_dtheta.flatten('F'), drV_dphi.flatten('F'), drV_dt, drV_ds.flatten('F')]
Jlan = np.c_[drV_ddelta[:, model.sourceLMInd, :].reshape((targetLandmarks.size, expCoef.size), order = 'F'), drV_dpsi[:, model.sourceLMInd].flatten('F'), drV_dtheta[:, model.sourceLMInd].flatten('F'), drV_dphi[:, model.sourceLMInd].flatten('F'), drV_dt[:model.sourceLMInd.size * 3, :], drV_ds[:, model.sourceLMInd].flatten('F')]
return 2 * (np.r_[np.zeros(idCoef.size), w[0] * np.dot(Jver.T, rver) / model.numVertices] + np.r_[np.zeros(idCoef.size), w[1] * np.dot(Jlan.T, rlan) / model.sourceLMInd.size] + w[2] * np.r_[np.zeros(idCoef.size), expCoef / model.expEval, np.zeros(7)]) | 48.451389 | 431 | 0.64727 | 958 | 6,977 | 4.645094 | 0.145094 | 0.097079 | 0.042022 | 0.044944 | 0.848539 | 0.829438 | 0.803146 | 0.762921 | 0.714157 | 0.682697 | 0 | 0.012117 | 0.195643 | 6,977 | 144 | 432 | 48.451389 | 0.780827 | 0.131145 | 0 | 0.615385 | 0 | 0 | 0.005634 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.051282 | false | 0 | 0.051282 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
974fe79158195b0758c5321e7dc917696da1f1a6 | 101 | py | Python | tests/test_main.py | nlbao/pocket_tools | dfd106903633633e55015b454129e8f8e7959c9e | [
"MIT"
] | 5 | 2020-07-09T09:15:54.000Z | 2022-01-04T07:28:27.000Z | tests/test_main.py | nlbao/pocket_tools | dfd106903633633e55015b454129e8f8e7959c9e | [
"MIT"
] | 22 | 2020-07-08T11:13:45.000Z | 2021-06-02T03:52:03.000Z | tests/test_main.py | nlbao/pocket_tools | dfd106903633633e55015b454129e8f8e7959c9e | [
"MIT"
] | 1 | 2020-09-24T21:17:26.000Z | 2020-09-24T21:17:26.000Z | import subprocess
def test_main():
subprocess.check_output('python3 pocket_stats', shell=True)
| 16.833333 | 63 | 0.772277 | 13 | 101 | 5.769231 | 0.923077 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011364 | 0.128713 | 101 | 5 | 64 | 20.2 | 0.840909 | 0 | 0 | 0 | 0 | 0 | 0.19802 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | true | 0 | 0.333333 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
975c7ee2c17684e0d2d3ea034be627949de25f19 | 101 | py | Python | content_interactions_monitoring/handlers.py | aaboffill/django-content-interactions | 8ea881e46cc6d5c375542939bb69d2980efdec23 | [
"BSD-3-Clause"
] | null | null | null | content_interactions_monitoring/handlers.py | aaboffill/django-content-interactions | 8ea881e46cc6d5c375542939bb69d2980efdec23 | [
"BSD-3-Clause"
] | null | null | null | content_interactions_monitoring/handlers.py | aaboffill/django-content-interactions | 8ea881e46cc6d5c375542939bb69d2980efdec23 | [
"BSD-3-Clause"
] | null | null | null | # coding=utf-8
def visit_handler(instance, **kwargs):
return instance, kwargs.get('user', None) | 20.2 | 45 | 0.70297 | 14 | 101 | 5 | 0.857143 | 0.4 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011494 | 0.138614 | 101 | 5 | 45 | 20.2 | 0.793103 | 0.118812 | 0 | 0 | 0 | 0 | 0.045455 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.5 | false | 0 | 0 | 0.5 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 6 |
97894e826ee207231549ad67089317b9aea90429 | 22 | py | Python | datas_utils/env/__init__.py | iatlab/datas-utils | b8eef303de5a5d5a57182c0627b721dde0b6b300 | [
"MIT"
] | null | null | null | datas_utils/env/__init__.py | iatlab/datas-utils | b8eef303de5a5d5a57182c0627b721dde0b6b300 | [
"MIT"
] | null | null | null | datas_utils/env/__init__.py | iatlab/datas-utils | b8eef303de5a5d5a57182c0627b721dde0b6b300 | [
"MIT"
] | null | null | null | from .env import load
| 11 | 21 | 0.772727 | 4 | 22 | 4.25 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.181818 | 22 | 1 | 22 | 22 | 0.944444 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
97b7a5044e9d7a21c4c809ccd334c690350f3a15 | 6,250 | py | Python | models.py | yyqqss09/ldct_denoising | 6cdf4d96cb879de62318c9c55c3b63fbc561220e | [
"MIT"
] | 18 | 2018-05-03T08:50:46.000Z | 2022-02-28T02:10:16.000Z | models.py | yyqqss09/ldct_denoising | 6cdf4d96cb879de62318c9c55c3b63fbc561220e | [
"MIT"
] | 7 | 2019-03-08T02:21:40.000Z | 2020-10-04T12:49:57.000Z | models.py | yyqqss09/ldct_denoising | 6cdf4d96cb879de62318c9c55c3b63fbc561220e | [
"MIT"
] | 5 | 2018-07-03T08:15:26.000Z | 2020-03-05T07:10:50.000Z | import tensorflow as tf
def leaky_relu(inputs, alpha):
return 0.5 * (1 + alpha) * inputs + 0.5 * (1-alpha) * tf.abs(inputs)
def cnn_model(inputs, padding='valid'):
outputs = tf.layers.conv2d(inputs, 32, 3, padding=padding, kernel_initializer=tf.contrib.layers.xavier_initializer(), name='conv1', use_bias=False)
outputs = tf.nn.relu(outputs)
outputs = tf.layers.conv2d(outputs, 32, 3, padding=padding, kernel_initializer=tf.contrib.layers.xavier_initializer(), name='conv2', use_bias=False)
outputs = tf.nn.relu(outputs)
outputs = tf.layers.conv2d(outputs, 32, 3, padding=padding, kernel_initializer=tf.contrib.layers.xavier_initializer(), name='conv3', use_bias=False)
outputs = tf.nn.relu(outputs)
outputs = tf.layers.conv2d(outputs, 32, 3, padding=padding, kernel_initializer=tf.contrib.layers.xavier_initializer(), name='conv4', use_bias=False)
outputs = tf.nn.relu(outputs)
outputs = tf.layers.conv2d(outputs, 32, 3, padding=padding, kernel_initializer=tf.contrib.layers.xavier_initializer(), name='conv5', use_bias=False)
outputs = tf.nn.relu(outputs)
outputs = tf.layers.conv2d(outputs, 32, 3, padding=padding, kernel_initializer=tf.contrib.layers.xavier_initializer(), name='conv6', use_bias=False)
outputs = tf.nn.relu(outputs)
outputs = tf.layers.conv2d(outputs, 32, 3, padding=padding, kernel_initializer=tf.contrib.layers.xavier_initializer(), name='conv7', use_bias=False)
outputs = tf.nn.relu(outputs)
outputs = tf.layers.conv2d(outputs, 1, 3, padding=padding, kernel_initializer=tf.contrib.layers.xavier_initializer(), name='conv8', use_bias=False)
outputs = tf.nn.relu(outputs)
return outputs
def discriminator_model(inputs):
outputs = tf.layers.conv2d(inputs, 64, 3, padding='same', kernel_initializer=tf.contrib.layers.xavier_initializer(), name='conv1')
outputs = leaky_relu(outputs, alpha=0.2)
outputs = tf.layers.conv2d(outputs, 64, 3, padding='same', strides=(2,2), kernel_initializer=tf.contrib.layers.xavier_initializer(), name='conv2')
outputs = leaky_relu(outputs, alpha=0.2)
outputs = tf.layers.conv2d(outputs, 128, 3, padding='same', kernel_initializer=tf.contrib.layers.xavier_initializer(), name='conv3')
outputs = leaky_relu(outputs, alpha=0.2)
outputs = tf.layers.conv2d(outputs, 128, 3, padding='same', strides=(2,2), kernel_initializer=tf.contrib.layers.xavier_initializer(), name='conv4')
outputs = leaky_relu(outputs, alpha=0.2)
outputs = tf.layers.conv2d(outputs, 256, 3, padding='same', kernel_initializer=tf.contrib.layers.xavier_initializer(), name='conv5')
outputs = leaky_relu(outputs, alpha=0.2)
outputs = tf.layers.conv2d(outputs, 256, 3, padding='same', strides=(2,2), kernel_initializer=tf.contrib.layers.xavier_initializer(), name='conv6')
outputs = leaky_relu(outputs, alpha=0.2)
outputs = tf.contrib.layers.flatten(outputs)
outputs = tf.layers.dense(outputs, units=1024, name='dense1')
outputs = leaky_relu(outputs, alpha=0.2)
outputs = tf.layers.dense(outputs, units=1, name='dense2')
return outputs
def vgg_model(inputs):
outputs = tf.concat([inputs*255-103.939, inputs*255-116.779, inputs*255-123.68], 3)
outputs = tf.layers.conv2d(outputs, 64, 3, padding='same', kernel_initializer=tf.contrib.layers.xavier_initializer(), activation=tf.nn.relu, name='conv1_1')
outputs = tf.layers.conv2d(outputs, 64, 3, padding='same', kernel_initializer=tf.contrib.layers.xavier_initializer(), activation=tf.nn.relu, name='conv1_2')
outputs = tf.layers.max_pooling2d(outputs, 2, strides=(2,2), padding='same', name='pool1')
outputs = tf.layers.conv2d(outputs, 128, 3, padding='same', kernel_initializer=tf.contrib.layers.xavier_initializer(), activation=tf.nn.relu, name='conv2_1')
outputs = tf.layers.conv2d(outputs, 128, 3, padding='same', kernel_initializer=tf.contrib.layers.xavier_initializer(), activation=tf.nn.relu, name='conv2_2')
outputs = tf.layers.max_pooling2d(outputs, 2, strides=(2,2), padding='same', name='pool2')
outputs = tf.layers.conv2d(outputs, 256, 3, padding='same', kernel_initializer=tf.contrib.layers.xavier_initializer(), activation=tf.nn.relu, name='conv3_1')
outputs = tf.layers.conv2d(outputs, 256, 3, padding='same', kernel_initializer=tf.contrib.layers.xavier_initializer(), activation=tf.nn.relu, name='conv3_2')
outputs = tf.layers.conv2d(outputs, 256, 3, padding='same', kernel_initializer=tf.contrib.layers.xavier_initializer(), activation=tf.nn.relu, name='conv3_3')
outputs = tf.layers.conv2d(outputs, 256, 3, padding='same', kernel_initializer=tf.contrib.layers.xavier_initializer(), activation=tf.nn.relu, name='conv3_4')
outputs = tf.layers.max_pooling2d(outputs, 2, strides=(2,2), padding='same', name='pool3')
outputs = tf.layers.conv2d(outputs, 512, 3, padding='same', kernel_initializer=tf.contrib.layers.xavier_initializer(), activation=tf.nn.relu, name='conv4_1')
outputs = tf.layers.conv2d(outputs, 512, 3, padding='same', kernel_initializer=tf.contrib.layers.xavier_initializer(), activation=tf.nn.relu, name='conv4_2')
outputs = tf.layers.conv2d(outputs, 512, 3, padding='same', kernel_initializer=tf.contrib.layers.xavier_initializer(), activation=tf.nn.relu, name='conv4_3')
outputs = tf.layers.conv2d(outputs, 512, 3, padding='same', kernel_initializer=tf.contrib.layers.xavier_initializer(), activation=tf.nn.relu, name='conv4_4')
outputs = tf.layers.max_pooling2d(outputs, 2, strides=(2,2), padding='same', name='pool4')
outputs = tf.layers.conv2d(outputs, 512, 3, padding='same', kernel_initializer=tf.contrib.layers.xavier_initializer(), activation=tf.nn.relu, name='conv5_1')
outputs = tf.layers.conv2d(outputs, 512, 3, padding='same', kernel_initializer=tf.contrib.layers.xavier_initializer(), activation=tf.nn.relu, name='conv5_2')
outputs = tf.layers.conv2d(outputs, 512, 3, padding='same', kernel_initializer=tf.contrib.layers.xavier_initializer(), activation=tf.nn.relu, name='conv5_3')
outputs = tf.layers.conv2d(outputs, 512, 3, padding='same', kernel_initializer=tf.contrib.layers.xavier_initializer(), activation=tf.nn.relu, name='conv5_4')
return outputs
| 71.022727 | 161 | 0.73376 | 880 | 6,250 | 5.098864 | 0.080682 | 0.092267 | 0.120348 | 0.140406 | 0.924894 | 0.912859 | 0.900825 | 0.900825 | 0.893247 | 0.870738 | 0 | 0.047285 | 0.11008 | 6,250 | 87 | 162 | 71.83908 | 0.759439 | 0 | 0 | 0.290323 | 0 | 0 | 0.051688 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.064516 | false | 0 | 0.016129 | 0.016129 | 0.145161 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
c172c2c69b11e37bcc77956be537c6222ff1e4bf | 213 | py | Python | jinjamator/plugins/content/mac/__init__.py | jinjamator/jinjamator | 6c48a6eedea9b9f461c66b5dddd609fa39610f0d | [
"Apache-2.0"
] | 7 | 2020-05-06T07:48:14.000Z | 2021-12-11T15:57:26.000Z | jinjamator/plugins/content/mac/__init__.py | jinjamator/jinjamator | 6c48a6eedea9b9f461c66b5dddd609fa39610f0d | [
"Apache-2.0"
] | 1 | 2020-04-11T15:13:07.000Z | 2020-04-27T20:01:34.000Z | jinjamator/plugins/content/mac/__init__.py | jinjamator/jinjamator | 6c48a6eedea9b9f461c66b5dddd609fa39610f0d | [
"Apache-2.0"
] | 1 | 2020-05-29T08:53:08.000Z | 2020-05-29T08:53:08.000Z | from netaddr import EUI, mac_unix_expanded
def to_unix(mac_address):
mac = EUI(mac_address)
mac.dialect = mac_unix_expanded
return str(mac)
def to_aci(mac_address):
return to_unix(mac_address)
| 17.75 | 42 | 0.741784 | 34 | 213 | 4.323529 | 0.411765 | 0.272109 | 0.204082 | 0.217687 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.183099 | 213 | 11 | 43 | 19.363636 | 0.844828 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.285714 | false | 0 | 0.142857 | 0.142857 | 0.714286 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 6 |
c1a7e65b599b0bcacc63d0c732b9f720c96986ce | 110 | py | Python | thonny/plugins/micropython/api_stubs/ubinascii.py | shreyas202/thonny | ef894c359200b0591cf98451907243395b817c63 | [
"MIT"
] | 2 | 2020-02-13T06:41:07.000Z | 2022-02-14T09:28:02.000Z | Thonny/Lib/site-packages/thonny/plugins/micropython/api_stubs/ubinascii.py | Pydiderot/pydiderotIDE | a42fcde3ea837ae40c957469f5d87427e8ce46d3 | [
"MIT"
] | 30 | 2019-01-04T10:14:56.000Z | 2020-10-12T14:00:31.000Z | Thonny/Lib/site-packages/thonny/plugins/micropython/api_stubs/ubinascii.py | Pydiderot/pydiderotIDE | a42fcde3ea837ae40c957469f5d87427e8ce46d3 | [
"MIT"
] | 3 | 2018-11-24T14:00:30.000Z | 2019-07-02T02:32:26.000Z | def a2b_base64():
pass
def b2a_base64():
pass
def hexlify():
pass
def unhexlify():
pass
| 7.333333 | 17 | 0.581818 | 14 | 110 | 4.428571 | 0.5 | 0.33871 | 0.419355 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.078947 | 0.309091 | 110 | 14 | 18 | 7.857143 | 0.736842 | 0 | 0 | 0.5 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.5 | true | 0.5 | 0 | 0 | 0.5 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 6 |
de14bf2c6721f14bc5e948ac44225de6da0d16fa | 41 | py | Python | __init__.py | halabikeren/vir_to_host | cfe0e37b60c9c36413c24e945f1674f339f95515 | [
"MIT"
] | null | null | null | __init__.py | halabikeren/vir_to_host | cfe0e37b60c9c36413c24e945f1674f339f95515 | [
"MIT"
] | null | null | null | __init__.py | halabikeren/vir_to_host | cfe0e37b60c9c36413c24e945f1674f339f95515 | [
"MIT"
] | null | null | null | from .utils import data_collecting_utils
| 20.5 | 40 | 0.878049 | 6 | 41 | 5.666667 | 0.833333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.097561 | 41 | 1 | 41 | 41 | 0.918919 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
a9b94a26c5c7481e51c231ec9a30491697d8af3a | 23 | py | Python | src/masonite/hashing/__init__.py | cercos/masonite | f7f220efa7fae833683e9f07ce13c3795a87d3b8 | [
"MIT"
] | 1,816 | 2018-02-14T01:59:51.000Z | 2022-03-31T17:09:20.000Z | src/masonite/hashing/__init__.py | cercos/masonite | f7f220efa7fae833683e9f07ce13c3795a87d3b8 | [
"MIT"
] | 340 | 2018-02-11T00:27:26.000Z | 2022-03-21T12:00:24.000Z | src/masonite/hashing/__init__.py | cercos/masonite | f7f220efa7fae833683e9f07ce13c3795a87d3b8 | [
"MIT"
] | 144 | 2018-03-18T00:08:16.000Z | 2022-02-26T01:51:58.000Z | from .Hash import Hash
| 11.5 | 22 | 0.782609 | 4 | 23 | 4.5 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.173913 | 23 | 1 | 23 | 23 | 0.947368 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
e752d5f1c8fc6e7a956a859288192d132fdfc2b6 | 95,573 | py | Python | src/sensory_cloud/generated/v1/audio/audio_pb2.py | Sensory-Cloud/python-sdk | 1457987481a7fbddaa6dff6b5b935c1a2c0d7213 | [
"Apache-2.0"
] | 2 | 2022-01-11T21:49:33.000Z | 2022-02-15T23:53:41.000Z | src/sensory_cloud/generated/v1/audio/audio_pb2.py | Sensory-Cloud/python-sdk | 1457987481a7fbddaa6dff6b5b935c1a2c0d7213 | [
"Apache-2.0"
] | null | null | null | src/sensory_cloud/generated/v1/audio/audio_pb2.py | Sensory-Cloud/python-sdk | 1457987481a7fbddaa6dff6b5b935c1a2c0d7213 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: v1/audio/audio.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from sensory_cloud.generated.validate import validate_pb2 as validate_dot_validate__pb2
from sensory_cloud.generated.common import common_pb2 as common_dot_common__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='v1/audio/audio.proto',
package='sensory.api.v1.audio',
syntax='proto3',
serialized_options=b'\n\034ai.sensorycloud.api.v1.audioB\026SensoryApiV1AudioProtoP\001Z:gitlab.com/sensory-cloud/server/titan.git/pkg/api/v1/audio',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x14v1/audio/audio.proto\x12\x14sensory.api.v1.audio\x1a\x17validate/validate.proto\x1a\x13\x63ommon/common.proto\"\x12\n\x10GetModelsRequest\"\xf2\x01\n\nAudioModel\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0cisEnrollable\x18\x02 \x01(\x08\x12\x30\n\tmodelType\x18\x03 \x01(\x0e\x32\x1d.sensory.api.common.ModelType\x12\x13\n\x0b\x66ixedPhrase\x18\x04 \x01(\t\x12\x12\n\nsampleRate\x18\x05 \x01(\x05\x12\x10\n\x08versions\x18\x06 \x03(\t\x12\x36\n\ntechnology\x18\x07 \x01(\x0e\x32\".sensory.api.common.TechnologyType\x12\x1b\n\x13isLivenessSupported\x18\x08 \x01(\x08\"\x7f\n AudioRequestPostProcessingAction\x12\x10\n\x08\x61\x63tionId\x18\x01 \x01(\t\x12I\n\x06\x61\x63tion\x18\x02 \x01(\x0e\x32/.sensory.api.v1.audio.AudioPostProcessingActionB\x08\xfa\x42\x05\x82\x01\x02\x10\x01\"\x80\x01\n!AudioResponsePostProcessingAction\x12\x10\n\x08\x61\x63tionId\x18\x01 \x01(\t\x12I\n\x06\x61\x63tion\x18\x02 \x01(\x0e\x32/.sensory.api.v1.audio.AudioPostProcessingActionB\x08\xfa\x42\x05\x82\x01\x02\x10\x01\"E\n\x11GetModelsResponse\x12\x30\n\x06models\x18\x01 \x03(\x0b\x32 .sensory.api.v1.audio.AudioModel\"\x8a\x01\n\x17\x43reateEnrollmentRequest\x12>\n\x06\x63onfig\x18\x01 \x01(\x0b\x32,.sensory.api.v1.audio.CreateEnrollmentConfigH\x00\x12\x16\n\x0c\x61udioContent\x18\x02 \x01(\x0cH\x00\x42\x17\n\x10streamingRequest\x12\x03\xf8\x42\x01\"\x82\x01\n\x13\x41uthenticateRequest\x12:\n\x06\x63onfig\x18\x01 \x01(\x0b\x32(.sensory.api.v1.audio.AuthenticateConfigH\x00\x12\x16\n\x0c\x61udioContent\x18\x02 \x01(\x0cH\x00\x42\x17\n\x10streamingRequest\x12\x03\xf8\x42\x01\"\xda\x01\n\x14ValidateEventRequest\x12;\n\x06\x63onfig\x18\x01 \x01(\x0b\x32).sensory.api.v1.audio.ValidateEventConfigH\x00\x12\x16\n\x0c\x61udioContent\x18\x02 \x01(\x0cH\x00\x12T\n\x14postProcessingAction\x18\n \x01(\x0b\x32\x36.sensory.api.v1.audio.AudioRequestPostProcessingActionB\x17\n\x10streamingRequest\x12\x03\xf8\x42\x01\"\x92\x01\n\x1a\x43reateEnrolledEventRequest\x12\x43\n\x06\x63onfig\x18\x01 \x01(\x0b\x32\x31.sensory.api.v1.audio.CreateEnrollmentEventConfigH\x00\x12\x16\n\x0c\x61udioContent\x18\x02 \x01(\x0cH\x00\x42\x17\n\x10streamingRequest\x12\x03\xf8\x42\x01\"\x94\x01\n\x1cValidateEnrolledEventRequest\x12\x43\n\x06\x63onfig\x18\x01 \x01(\x0b\x32\x31.sensory.api.v1.audio.ValidateEnrolledEventConfigH\x00\x12\x16\n\x0c\x61udioContent\x18\x02 \x01(\x0cH\x00\x42\x17\n\x10streamingRequest\x12\x03\xf8\x42\x01\"\xd4\x01\n\x11TranscribeRequest\x12\x38\n\x06\x63onfig\x18\x01 \x01(\x0b\x32&.sensory.api.v1.audio.TranscribeConfigH\x00\x12\x16\n\x0c\x61udioContent\x18\x02 \x01(\x0cH\x00\x12T\n\x14postProcessingAction\x18\n \x01(\x0b\x32\x36.sensory.api.v1.audio.AudioRequestPostProcessingActionB\x17\n\x10streamingRequest\x12\x03\xf8\x42\x01\"\xbc\x01\n\x18\x43reateEnrollmentResponse\x12\x17\n\x0fpercentComplete\x18\x01 \x01(\x03\x12\x13\n\x0b\x61udioEnergy\x18\x02 \x01(\x02\x12\x14\n\x0c\x65nrollmentId\x18\x03 \x01(\t\x12\x11\n\tmodelName\x18\x04 \x01(\t\x12\x14\n\x0cmodelVersion\x18\x05 \x01(\t\x12\x13\n\x0bmodelPrompt\x18\x06 \x01(\t\x12\x1e\n\x16percentSegmentComplete\x18\x07 \x01(\x03\"\xc9\x01\n\x14\x41uthenticateResponse\x12\x13\n\x0b\x61udioEnergy\x18\x01 \x01(\x02\x12\x0f\n\x07success\x18\x02 \x01(\x08\x12\x30\n\x05token\x18\x03 \x01(\x0b\x32!.sensory.api.common.TokenResponse\x12\x0e\n\x06userId\x18\x04 \x01(\t\x12\x14\n\x0c\x65nrollmentId\x18\x05 \x01(\t\x12\x13\n\x0bmodelPrompt\x18\x06 \x01(\t\x12\x1e\n\x16percentSegmentComplete\x18\x07 \x01(\x03\"\xb5\x01\n\x15ValidateEventResponse\x12\x13\n\x0b\x61udioEnergy\x18\x01 \x01(\x02\x12\x0f\n\x07success\x18\x02 \x01(\x08\x12\x10\n\x08resultId\x18\x03 \x01(\t\x12\r\n\x05score\x18\x04 \x01(\x02\x12U\n\x14postProcessingAction\x18\n \x01(\x0b\x32\x37.sensory.api.v1.audio.AudioResponsePostProcessingAction\"\x80\x01\n\x1dValidateEnrolledEventResponse\x12\x13\n\x0b\x61udioEnergy\x18\x01 \x01(\x02\x12\x0f\n\x07success\x18\x02 \x01(\x08\x12\x14\n\x0c\x65nrollmentId\x18\x03 \x01(\t\x12\x0e\n\x06userId\x18\x04 \x01(\t\x12\x13\n\x0bmodelPrompt\x18\x05 \x01(\t\"\xad\x01\n\x12TranscribeResponse\x12\x13\n\x0b\x61udioEnergy\x18\x01 \x01(\x02\x12\x12\n\ntranscript\x18\x02 \x01(\t\x12\x17\n\x0fisPartialResult\x18\x03 \x01(\x08\x12U\n\x14postProcessingAction\x18\n \x01(\x0b\x32\x37.sensory.api.v1.audio.AudioResponsePostProcessingAction\"\xf0\x02\n\x16\x43reateEnrollmentConfig\x12:\n\x05\x61udio\x18\x01 \x01(\x0b\x32!.sensory.api.v1.audio.AudioConfigB\x08\xfa\x42\x05\x8a\x01\x02\x10\x01\x12\x19\n\x06userId\x18\x02 \x01(\tB\t\xfa\x42\x06r\x04\x10\x01\x18\x7f\x12\x1b\n\x08\x64\x65viceId\x18\x03 \x01(\tB\t\xfa\x42\x06r\x04\x10\x01\x18\x7f\x12\x1d\n\tmodelName\x18\x04 \x01(\tB\n\xfa\x42\x07r\x05\x10\x01\x18\xff\x01\x12\x1d\n\x0b\x64\x65scription\x18\x05 \x01(\tB\x08\xfa\x42\x05r\x03\x18\xff\x07\x12\x19\n\x11isLivenessEnabled\x18\x06 \x01(\x08\x12,\n\x17\x65nrollmentNumUtterances\x18\x07 \x01(\rB\t\xfa\x42\x06*\x04\x18\n(\x00H\x00\x12-\n\x12\x65nrollmentDuration\x18\x08 \x01(\x02\x42\x0f\xfa\x42\x0c\n\n\x1d\x00\x00pA-\x00\x00\x00\x00H\x00\x12\x1c\n\x0breferenceId\x18\t \x01(\tB\x07\xfa\x42\x04r\x02\x18\x7f\x42\x0e\n\x0c\x65nrollLength\"\x9c\x03\n\x12\x41uthenticateConfig\x12:\n\x05\x61udio\x18\x01 \x01(\x0b\x32!.sensory.api.v1.audio.AudioConfigB\x08\xfa\x42\x05\x8a\x01\x02\x10\x01\x12 \n\x0c\x65nrollmentId\x18\x02 \x01(\tB\x08\xfa\x42\x05r\x03\xb0\x01\x01H\x00\x12\x1b\n\x11\x65nrollmentGroupId\x18\x03 \x01(\tH\x00\x12\x16\n\x0e\x64oIncludeToken\x18\x04 \x01(\x08\x12I\n\x0bsensitivity\x18\x05 \x01(\x0e\x32*.sensory.api.v1.audio.ThresholdSensitivityB\x08\xfa\x42\x05\x82\x01\x02\x10\x01\x12V\n\x08security\x18\x06 \x01(\x0e\x32:.sensory.api.v1.audio.AuthenticateConfig.ThresholdSecurityB\x08\xfa\x42\x05\x82\x01\x02\x10\x01\x12\x19\n\x11isLivenessEnabled\x18\x07 \x01(\x08\"&\n\x11ThresholdSecurity\x12\x08\n\x04HIGH\x10\x00\x12\x07\n\x03LOW\x10\x01\x42\r\n\x06\x61uthId\x12\x03\xf8\x42\x01\"\xd6\x01\n\x13ValidateEventConfig\x12:\n\x05\x61udio\x18\x01 \x01(\x0b\x32!.sensory.api.v1.audio.AudioConfigB\x08\xfa\x42\x05\x8a\x01\x02\x10\x01\x12\x1d\n\tmodelName\x18\x02 \x01(\tB\n\xfa\x42\x07r\x05\x10\x01\x18\xff\x01\x12\x19\n\x06userId\x18\x03 \x01(\tB\t\xfa\x42\x06r\x04\x10\x01\x18\x7f\x12I\n\x0bsensitivity\x18\x04 \x01(\x0e\x32*.sensory.api.v1.audio.ThresholdSensitivityB\x08\xfa\x42\x05\x82\x01\x02\x10\x01\"\xbd\x02\n\x1b\x43reateEnrollmentEventConfig\x12:\n\x05\x61udio\x18\x01 \x01(\x0b\x32!.sensory.api.v1.audio.AudioConfigB\x08\xfa\x42\x05\x8a\x01\x02\x10\x01\x12\x19\n\x06userId\x18\x02 \x01(\tB\t\xfa\x42\x06r\x04\x10\x01\x18\x7f\x12\x1d\n\tmodelName\x18\x03 \x01(\tB\n\xfa\x42\x07r\x05\x10\x01\x18\xff\x01\x12\x1d\n\x0b\x64\x65scription\x18\x04 \x01(\tB\x08\xfa\x42\x05r\x03\x18\xff\x07\x12,\n\x17\x65nrollmentNumUtterances\x18\x05 \x01(\rB\t\xfa\x42\x06*\x04\x18\n(\x00H\x00\x12-\n\x12\x65nrollmentDuration\x18\x06 \x01(\x02\x42\x0f\xfa\x42\x0c\n\n\x1d\x00\x00pA-\x00\x00\x00\x00H\x00\x12\x1c\n\x0breferenceId\x18\x07 \x01(\tB\x07\xfa\x42\x04r\x02\x18\x7f\x42\x0e\n\x0c\x65nrollLength\"\xf2\x01\n\x1bValidateEnrolledEventConfig\x12:\n\x05\x61udio\x18\x01 \x01(\x0b\x32!.sensory.api.v1.audio.AudioConfigB\x08\xfa\x42\x05\x8a\x01\x02\x10\x01\x12 \n\x0c\x65nrollmentId\x18\x02 \x01(\tB\x08\xfa\x42\x05r\x03\xb0\x01\x01H\x00\x12\x1b\n\x11\x65nrollmentGroupId\x18\x03 \x01(\tH\x00\x12I\n\x0bsensitivity\x18\x04 \x01(\x0e\x32*.sensory.api.v1.audio.ThresholdSensitivityB\x08\xfa\x42\x05\x82\x01\x02\x10\x01\x42\r\n\x06\x61uthId\x12\x03\xf8\x42\x01\"\x88\x01\n\x10TranscribeConfig\x12:\n\x05\x61udio\x18\x01 \x01(\x0b\x32!.sensory.api.v1.audio.AudioConfigB\x08\xfa\x42\x05\x8a\x01\x02\x10\x01\x12\x1d\n\tmodelName\x18\x02 \x01(\tB\n\xfa\x42\x07r\x05\x10\x01\x18\xff\x01\x12\x19\n\x06userId\x18\x03 \x01(\tB\t\xfa\x42\x06r\x04\x10\x01\x18\x7f\"\xeb\x01\n\x0b\x41udioConfig\x12K\n\x08\x65ncoding\x18\x01 \x01(\x0e\x32/.sensory.api.v1.audio.AudioConfig.AudioEncodingB\x08\xfa\x42\x05\x82\x01\x02\x10\x01\x12!\n\x0fsampleRateHertz\x18\x02 \x01(\x05\x42\x08\xfa\x42\x05\x1a\x03 \xc0>\x12\"\n\x11\x61udioChannelCount\x18\x03 \x01(\x05\x42\x07\xfa\x42\x04\x1a\x02 \x00\x12\x14\n\x0clanguageCode\x18\x04 \x01(\t\"2\n\rAudioEncoding\x12\x0c\n\x08LINEAR16\x10\x00\x12\x08\n\x04\x46LAC\x10\x01\x12\t\n\x05MULAW\x10\x02*>\n\x19\x41udioPostProcessingAction\x12\x0b\n\x07NOT_SET\x10\x00\x12\t\n\x05\x46LUSH\x10\x01\x12\t\n\x05RESET\x10\x02*N\n\x14ThresholdSensitivity\x12\n\n\x06LOWEST\x10\x00\x12\x07\n\x03LOW\x10\x01\x12\n\n\x06MEDIUM\x10\x02\x12\x08\n\x04HIGH\x10\x03\x12\x0b\n\x07HIGHEST\x10\x04\x32m\n\x0b\x41udioModels\x12^\n\tGetModels\x12&.sensory.api.v1.audio.GetModelsRequest\x1a\'.sensory.api.v1.audio.GetModelsResponse\"\x00\x32\xf7\x01\n\x0f\x41udioBiometrics\x12w\n\x10\x43reateEnrollment\x12-.sensory.api.v1.audio.CreateEnrollmentRequest\x1a..sensory.api.v1.audio.CreateEnrollmentResponse\"\x00(\x01\x30\x01\x12k\n\x0c\x41uthenticate\x12).sensory.api.v1.audio.AuthenticateRequest\x1a*.sensory.api.v1.audio.AuthenticateResponse\"\x00(\x01\x30\x01\x32\x85\x03\n\x0b\x41udioEvents\x12n\n\rValidateEvent\x12*.sensory.api.v1.audio.ValidateEventRequest\x1a+.sensory.api.v1.audio.ValidateEventResponse\"\x00(\x01\x30\x01\x12}\n\x13\x43reateEnrolledEvent\x12\x30.sensory.api.v1.audio.CreateEnrolledEventRequest\x1a..sensory.api.v1.audio.CreateEnrollmentResponse\"\x00(\x01\x30\x01\x12\x86\x01\n\x15ValidateEnrolledEvent\x12\x32.sensory.api.v1.audio.ValidateEnrolledEventRequest\x1a\x33.sensory.api.v1.audio.ValidateEnrolledEventResponse\"\x00(\x01\x30\x01\x32|\n\x13\x41udioTranscriptions\x12\x65\n\nTranscribe\x12\'.sensory.api.v1.audio.TranscribeRequest\x1a(.sensory.api.v1.audio.TranscribeResponse\"\x00(\x01\x30\x01\x42t\n\x1c\x61i.sensorycloud.api.v1.audioB\x16SensoryApiV1AudioProtoP\x01Z:gitlab.com/sensory-cloud/server/titan.git/pkg/api/v1/audiob\x06proto3'
,
dependencies=[validate_dot_validate__pb2.DESCRIPTOR,common_dot_common__pb2.DESCRIPTOR,])
_AUDIOPOSTPROCESSINGACTION = _descriptor.EnumDescriptor(
name='AudioPostProcessingAction',
full_name='sensory.api.v1.audio.AudioPostProcessingAction',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='NOT_SET', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='FLUSH', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='RESET', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=4529,
serialized_end=4591,
)
_sym_db.RegisterEnumDescriptor(_AUDIOPOSTPROCESSINGACTION)
AudioPostProcessingAction = enum_type_wrapper.EnumTypeWrapper(_AUDIOPOSTPROCESSINGACTION)
_THRESHOLDSENSITIVITY = _descriptor.EnumDescriptor(
name='ThresholdSensitivity',
full_name='sensory.api.v1.audio.ThresholdSensitivity',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='LOWEST', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='LOW', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MEDIUM', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='HIGH', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='HIGHEST', index=4, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=4593,
serialized_end=4671,
)
_sym_db.RegisterEnumDescriptor(_THRESHOLDSENSITIVITY)
ThresholdSensitivity = enum_type_wrapper.EnumTypeWrapper(_THRESHOLDSENSITIVITY)
NOT_SET = 0
FLUSH = 1
RESET = 2
LOWEST = 0
LOW = 1
MEDIUM = 2
HIGH = 3
HIGHEST = 4
_AUTHENTICATECONFIG_THRESHOLDSECURITY = _descriptor.EnumDescriptor(
name='ThresholdSecurity',
full_name='sensory.api.v1.audio.AuthenticateConfig.ThresholdSecurity',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='HIGH', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='LOW', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=3315,
serialized_end=3353,
)
_sym_db.RegisterEnumDescriptor(_AUTHENTICATECONFIG_THRESHOLDSECURITY)
_AUDIOCONFIG_AUDIOENCODING = _descriptor.EnumDescriptor(
name='AudioEncoding',
full_name='sensory.api.v1.audio.AudioConfig.AudioEncoding',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='LINEAR16', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='FLAC', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MULAW', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=4477,
serialized_end=4527,
)
_sym_db.RegisterEnumDescriptor(_AUDIOCONFIG_AUDIOENCODING)
_GETMODELSREQUEST = _descriptor.Descriptor(
name='GetModelsRequest',
full_name='sensory.api.v1.audio.GetModelsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=92,
serialized_end=110,
)
_AUDIOMODEL = _descriptor.Descriptor(
name='AudioModel',
full_name='sensory.api.v1.audio.AudioModel',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='sensory.api.v1.audio.AudioModel.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='isEnrollable', full_name='sensory.api.v1.audio.AudioModel.isEnrollable', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='modelType', full_name='sensory.api.v1.audio.AudioModel.modelType', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='fixedPhrase', full_name='sensory.api.v1.audio.AudioModel.fixedPhrase', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='sampleRate', full_name='sensory.api.v1.audio.AudioModel.sampleRate', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='versions', full_name='sensory.api.v1.audio.AudioModel.versions', index=5,
number=6, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='technology', full_name='sensory.api.v1.audio.AudioModel.technology', index=6,
number=7, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='isLivenessSupported', full_name='sensory.api.v1.audio.AudioModel.isLivenessSupported', index=7,
number=8, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=113,
serialized_end=355,
)
_AUDIOREQUESTPOSTPROCESSINGACTION = _descriptor.Descriptor(
name='AudioRequestPostProcessingAction',
full_name='sensory.api.v1.audio.AudioRequestPostProcessingAction',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='actionId', full_name='sensory.api.v1.audio.AudioRequestPostProcessingAction.actionId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='action', full_name='sensory.api.v1.audio.AudioRequestPostProcessingAction.action', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\005\202\001\002\020\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=357,
serialized_end=484,
)
_AUDIORESPONSEPOSTPROCESSINGACTION = _descriptor.Descriptor(
name='AudioResponsePostProcessingAction',
full_name='sensory.api.v1.audio.AudioResponsePostProcessingAction',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='actionId', full_name='sensory.api.v1.audio.AudioResponsePostProcessingAction.actionId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='action', full_name='sensory.api.v1.audio.AudioResponsePostProcessingAction.action', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\005\202\001\002\020\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=487,
serialized_end=615,
)
_GETMODELSRESPONSE = _descriptor.Descriptor(
name='GetModelsResponse',
full_name='sensory.api.v1.audio.GetModelsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='models', full_name='sensory.api.v1.audio.GetModelsResponse.models', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=617,
serialized_end=686,
)
_CREATEENROLLMENTREQUEST = _descriptor.Descriptor(
name='CreateEnrollmentRequest',
full_name='sensory.api.v1.audio.CreateEnrollmentRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='config', full_name='sensory.api.v1.audio.CreateEnrollmentRequest.config', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='audioContent', full_name='sensory.api.v1.audio.CreateEnrollmentRequest.audioContent', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='streamingRequest', full_name='sensory.api.v1.audio.CreateEnrollmentRequest.streamingRequest',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[], serialized_options=b'\370B\001'),
],
serialized_start=689,
serialized_end=827,
)
_AUTHENTICATEREQUEST = _descriptor.Descriptor(
name='AuthenticateRequest',
full_name='sensory.api.v1.audio.AuthenticateRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='config', full_name='sensory.api.v1.audio.AuthenticateRequest.config', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='audioContent', full_name='sensory.api.v1.audio.AuthenticateRequest.audioContent', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='streamingRequest', full_name='sensory.api.v1.audio.AuthenticateRequest.streamingRequest',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[], serialized_options=b'\370B\001'),
],
serialized_start=830,
serialized_end=960,
)
_VALIDATEEVENTREQUEST = _descriptor.Descriptor(
name='ValidateEventRequest',
full_name='sensory.api.v1.audio.ValidateEventRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='config', full_name='sensory.api.v1.audio.ValidateEventRequest.config', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='audioContent', full_name='sensory.api.v1.audio.ValidateEventRequest.audioContent', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='postProcessingAction', full_name='sensory.api.v1.audio.ValidateEventRequest.postProcessingAction', index=2,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='streamingRequest', full_name='sensory.api.v1.audio.ValidateEventRequest.streamingRequest',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[], serialized_options=b'\370B\001'),
],
serialized_start=963,
serialized_end=1181,
)
_CREATEENROLLEDEVENTREQUEST = _descriptor.Descriptor(
name='CreateEnrolledEventRequest',
full_name='sensory.api.v1.audio.CreateEnrolledEventRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='config', full_name='sensory.api.v1.audio.CreateEnrolledEventRequest.config', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='audioContent', full_name='sensory.api.v1.audio.CreateEnrolledEventRequest.audioContent', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='streamingRequest', full_name='sensory.api.v1.audio.CreateEnrolledEventRequest.streamingRequest',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[], serialized_options=b'\370B\001'),
],
serialized_start=1184,
serialized_end=1330,
)
_VALIDATEENROLLEDEVENTREQUEST = _descriptor.Descriptor(
name='ValidateEnrolledEventRequest',
full_name='sensory.api.v1.audio.ValidateEnrolledEventRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='config', full_name='sensory.api.v1.audio.ValidateEnrolledEventRequest.config', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='audioContent', full_name='sensory.api.v1.audio.ValidateEnrolledEventRequest.audioContent', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='streamingRequest', full_name='sensory.api.v1.audio.ValidateEnrolledEventRequest.streamingRequest',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[], serialized_options=b'\370B\001'),
],
serialized_start=1333,
serialized_end=1481,
)
_TRANSCRIBEREQUEST = _descriptor.Descriptor(
name='TranscribeRequest',
full_name='sensory.api.v1.audio.TranscribeRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='config', full_name='sensory.api.v1.audio.TranscribeRequest.config', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='audioContent', full_name='sensory.api.v1.audio.TranscribeRequest.audioContent', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='postProcessingAction', full_name='sensory.api.v1.audio.TranscribeRequest.postProcessingAction', index=2,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='streamingRequest', full_name='sensory.api.v1.audio.TranscribeRequest.streamingRequest',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[], serialized_options=b'\370B\001'),
],
serialized_start=1484,
serialized_end=1696,
)
_CREATEENROLLMENTRESPONSE = _descriptor.Descriptor(
name='CreateEnrollmentResponse',
full_name='sensory.api.v1.audio.CreateEnrollmentResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='percentComplete', full_name='sensory.api.v1.audio.CreateEnrollmentResponse.percentComplete', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='audioEnergy', full_name='sensory.api.v1.audio.CreateEnrollmentResponse.audioEnergy', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='enrollmentId', full_name='sensory.api.v1.audio.CreateEnrollmentResponse.enrollmentId', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='modelName', full_name='sensory.api.v1.audio.CreateEnrollmentResponse.modelName', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='modelVersion', full_name='sensory.api.v1.audio.CreateEnrollmentResponse.modelVersion', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='modelPrompt', full_name='sensory.api.v1.audio.CreateEnrollmentResponse.modelPrompt', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='percentSegmentComplete', full_name='sensory.api.v1.audio.CreateEnrollmentResponse.percentSegmentComplete', index=6,
number=7, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1699,
serialized_end=1887,
)
_AUTHENTICATERESPONSE = _descriptor.Descriptor(
name='AuthenticateResponse',
full_name='sensory.api.v1.audio.AuthenticateResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='audioEnergy', full_name='sensory.api.v1.audio.AuthenticateResponse.audioEnergy', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='success', full_name='sensory.api.v1.audio.AuthenticateResponse.success', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='token', full_name='sensory.api.v1.audio.AuthenticateResponse.token', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='userId', full_name='sensory.api.v1.audio.AuthenticateResponse.userId', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='enrollmentId', full_name='sensory.api.v1.audio.AuthenticateResponse.enrollmentId', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='modelPrompt', full_name='sensory.api.v1.audio.AuthenticateResponse.modelPrompt', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='percentSegmentComplete', full_name='sensory.api.v1.audio.AuthenticateResponse.percentSegmentComplete', index=6,
number=7, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1890,
serialized_end=2091,
)
_VALIDATEEVENTRESPONSE = _descriptor.Descriptor(
name='ValidateEventResponse',
full_name='sensory.api.v1.audio.ValidateEventResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='audioEnergy', full_name='sensory.api.v1.audio.ValidateEventResponse.audioEnergy', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='success', full_name='sensory.api.v1.audio.ValidateEventResponse.success', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='resultId', full_name='sensory.api.v1.audio.ValidateEventResponse.resultId', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='score', full_name='sensory.api.v1.audio.ValidateEventResponse.score', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='postProcessingAction', full_name='sensory.api.v1.audio.ValidateEventResponse.postProcessingAction', index=4,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2094,
serialized_end=2275,
)
_VALIDATEENROLLEDEVENTRESPONSE = _descriptor.Descriptor(
name='ValidateEnrolledEventResponse',
full_name='sensory.api.v1.audio.ValidateEnrolledEventResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='audioEnergy', full_name='sensory.api.v1.audio.ValidateEnrolledEventResponse.audioEnergy', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='success', full_name='sensory.api.v1.audio.ValidateEnrolledEventResponse.success', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='enrollmentId', full_name='sensory.api.v1.audio.ValidateEnrolledEventResponse.enrollmentId', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='userId', full_name='sensory.api.v1.audio.ValidateEnrolledEventResponse.userId', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='modelPrompt', full_name='sensory.api.v1.audio.ValidateEnrolledEventResponse.modelPrompt', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2278,
serialized_end=2406,
)
_TRANSCRIBERESPONSE = _descriptor.Descriptor(
name='TranscribeResponse',
full_name='sensory.api.v1.audio.TranscribeResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='audioEnergy', full_name='sensory.api.v1.audio.TranscribeResponse.audioEnergy', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='transcript', full_name='sensory.api.v1.audio.TranscribeResponse.transcript', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='isPartialResult', full_name='sensory.api.v1.audio.TranscribeResponse.isPartialResult', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='postProcessingAction', full_name='sensory.api.v1.audio.TranscribeResponse.postProcessingAction', index=3,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2409,
serialized_end=2582,
)
_CREATEENROLLMENTCONFIG = _descriptor.Descriptor(
name='CreateEnrollmentConfig',
full_name='sensory.api.v1.audio.CreateEnrollmentConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='audio', full_name='sensory.api.v1.audio.CreateEnrollmentConfig.audio', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\005\212\001\002\020\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='userId', full_name='sensory.api.v1.audio.CreateEnrollmentConfig.userId', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\006r\004\020\001\030\177', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='deviceId', full_name='sensory.api.v1.audio.CreateEnrollmentConfig.deviceId', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\006r\004\020\001\030\177', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='modelName', full_name='sensory.api.v1.audio.CreateEnrollmentConfig.modelName', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\007r\005\020\001\030\377\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='description', full_name='sensory.api.v1.audio.CreateEnrollmentConfig.description', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\005r\003\030\377\007', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='isLivenessEnabled', full_name='sensory.api.v1.audio.CreateEnrollmentConfig.isLivenessEnabled', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='enrollmentNumUtterances', full_name='sensory.api.v1.audio.CreateEnrollmentConfig.enrollmentNumUtterances', index=6,
number=7, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\006*\004\030\n(\000', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='enrollmentDuration', full_name='sensory.api.v1.audio.CreateEnrollmentConfig.enrollmentDuration', index=7,
number=8, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\014\n\n\035\000\000pA-\000\000\000\000', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='referenceId', full_name='sensory.api.v1.audio.CreateEnrollmentConfig.referenceId', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\004r\002\030\177', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='enrollLength', full_name='sensory.api.v1.audio.CreateEnrollmentConfig.enrollLength',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=2585,
serialized_end=2953,
)
_AUTHENTICATECONFIG = _descriptor.Descriptor(
name='AuthenticateConfig',
full_name='sensory.api.v1.audio.AuthenticateConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='audio', full_name='sensory.api.v1.audio.AuthenticateConfig.audio', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\005\212\001\002\020\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='enrollmentId', full_name='sensory.api.v1.audio.AuthenticateConfig.enrollmentId', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\005r\003\260\001\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='enrollmentGroupId', full_name='sensory.api.v1.audio.AuthenticateConfig.enrollmentGroupId', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='doIncludeToken', full_name='sensory.api.v1.audio.AuthenticateConfig.doIncludeToken', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='sensitivity', full_name='sensory.api.v1.audio.AuthenticateConfig.sensitivity', index=4,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\005\202\001\002\020\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='security', full_name='sensory.api.v1.audio.AuthenticateConfig.security', index=5,
number=6, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\005\202\001\002\020\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='isLivenessEnabled', full_name='sensory.api.v1.audio.AuthenticateConfig.isLivenessEnabled', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_AUTHENTICATECONFIG_THRESHOLDSECURITY,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='authId', full_name='sensory.api.v1.audio.AuthenticateConfig.authId',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[], serialized_options=b'\370B\001'),
],
serialized_start=2956,
serialized_end=3368,
)
_VALIDATEEVENTCONFIG = _descriptor.Descriptor(
name='ValidateEventConfig',
full_name='sensory.api.v1.audio.ValidateEventConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='audio', full_name='sensory.api.v1.audio.ValidateEventConfig.audio', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\005\212\001\002\020\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='modelName', full_name='sensory.api.v1.audio.ValidateEventConfig.modelName', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\007r\005\020\001\030\377\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='userId', full_name='sensory.api.v1.audio.ValidateEventConfig.userId', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\006r\004\020\001\030\177', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='sensitivity', full_name='sensory.api.v1.audio.ValidateEventConfig.sensitivity', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\005\202\001\002\020\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3371,
serialized_end=3585,
)
_CREATEENROLLMENTEVENTCONFIG = _descriptor.Descriptor(
name='CreateEnrollmentEventConfig',
full_name='sensory.api.v1.audio.CreateEnrollmentEventConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='audio', full_name='sensory.api.v1.audio.CreateEnrollmentEventConfig.audio', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\005\212\001\002\020\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='userId', full_name='sensory.api.v1.audio.CreateEnrollmentEventConfig.userId', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\006r\004\020\001\030\177', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='modelName', full_name='sensory.api.v1.audio.CreateEnrollmentEventConfig.modelName', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\007r\005\020\001\030\377\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='description', full_name='sensory.api.v1.audio.CreateEnrollmentEventConfig.description', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\005r\003\030\377\007', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='enrollmentNumUtterances', full_name='sensory.api.v1.audio.CreateEnrollmentEventConfig.enrollmentNumUtterances', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\006*\004\030\n(\000', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='enrollmentDuration', full_name='sensory.api.v1.audio.CreateEnrollmentEventConfig.enrollmentDuration', index=5,
number=6, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\014\n\n\035\000\000pA-\000\000\000\000', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='referenceId', full_name='sensory.api.v1.audio.CreateEnrollmentEventConfig.referenceId', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\004r\002\030\177', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='enrollLength', full_name='sensory.api.v1.audio.CreateEnrollmentEventConfig.enrollLength',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=3588,
serialized_end=3905,
)
_VALIDATEENROLLEDEVENTCONFIG = _descriptor.Descriptor(
name='ValidateEnrolledEventConfig',
full_name='sensory.api.v1.audio.ValidateEnrolledEventConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='audio', full_name='sensory.api.v1.audio.ValidateEnrolledEventConfig.audio', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\005\212\001\002\020\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='enrollmentId', full_name='sensory.api.v1.audio.ValidateEnrolledEventConfig.enrollmentId', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\005r\003\260\001\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='enrollmentGroupId', full_name='sensory.api.v1.audio.ValidateEnrolledEventConfig.enrollmentGroupId', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='sensitivity', full_name='sensory.api.v1.audio.ValidateEnrolledEventConfig.sensitivity', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\005\202\001\002\020\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='authId', full_name='sensory.api.v1.audio.ValidateEnrolledEventConfig.authId',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[], serialized_options=b'\370B\001'),
],
serialized_start=3908,
serialized_end=4150,
)
_TRANSCRIBECONFIG = _descriptor.Descriptor(
name='TranscribeConfig',
full_name='sensory.api.v1.audio.TranscribeConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='audio', full_name='sensory.api.v1.audio.TranscribeConfig.audio', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\005\212\001\002\020\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='modelName', full_name='sensory.api.v1.audio.TranscribeConfig.modelName', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\007r\005\020\001\030\377\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='userId', full_name='sensory.api.v1.audio.TranscribeConfig.userId', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\006r\004\020\001\030\177', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4153,
serialized_end=4289,
)
_AUDIOCONFIG = _descriptor.Descriptor(
name='AudioConfig',
full_name='sensory.api.v1.audio.AudioConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='encoding', full_name='sensory.api.v1.audio.AudioConfig.encoding', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\005\202\001\002\020\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='sampleRateHertz', full_name='sensory.api.v1.audio.AudioConfig.sampleRateHertz', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\005\032\003 \300>', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='audioChannelCount', full_name='sensory.api.v1.audio.AudioConfig.audioChannelCount', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\004\032\002 \000', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='languageCode', full_name='sensory.api.v1.audio.AudioConfig.languageCode', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_AUDIOCONFIG_AUDIOENCODING,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4292,
serialized_end=4527,
)
_AUDIOMODEL.fields_by_name['modelType'].enum_type = common_dot_common__pb2._MODELTYPE
_AUDIOMODEL.fields_by_name['technology'].enum_type = common_dot_common__pb2._TECHNOLOGYTYPE
_AUDIOREQUESTPOSTPROCESSINGACTION.fields_by_name['action'].enum_type = _AUDIOPOSTPROCESSINGACTION
_AUDIORESPONSEPOSTPROCESSINGACTION.fields_by_name['action'].enum_type = _AUDIOPOSTPROCESSINGACTION
_GETMODELSRESPONSE.fields_by_name['models'].message_type = _AUDIOMODEL
_CREATEENROLLMENTREQUEST.fields_by_name['config'].message_type = _CREATEENROLLMENTCONFIG
_CREATEENROLLMENTREQUEST.oneofs_by_name['streamingRequest'].fields.append(
_CREATEENROLLMENTREQUEST.fields_by_name['config'])
_CREATEENROLLMENTREQUEST.fields_by_name['config'].containing_oneof = _CREATEENROLLMENTREQUEST.oneofs_by_name['streamingRequest']
_CREATEENROLLMENTREQUEST.oneofs_by_name['streamingRequest'].fields.append(
_CREATEENROLLMENTREQUEST.fields_by_name['audioContent'])
_CREATEENROLLMENTREQUEST.fields_by_name['audioContent'].containing_oneof = _CREATEENROLLMENTREQUEST.oneofs_by_name['streamingRequest']
_AUTHENTICATEREQUEST.fields_by_name['config'].message_type = _AUTHENTICATECONFIG
_AUTHENTICATEREQUEST.oneofs_by_name['streamingRequest'].fields.append(
_AUTHENTICATEREQUEST.fields_by_name['config'])
_AUTHENTICATEREQUEST.fields_by_name['config'].containing_oneof = _AUTHENTICATEREQUEST.oneofs_by_name['streamingRequest']
_AUTHENTICATEREQUEST.oneofs_by_name['streamingRequest'].fields.append(
_AUTHENTICATEREQUEST.fields_by_name['audioContent'])
_AUTHENTICATEREQUEST.fields_by_name['audioContent'].containing_oneof = _AUTHENTICATEREQUEST.oneofs_by_name['streamingRequest']
_VALIDATEEVENTREQUEST.fields_by_name['config'].message_type = _VALIDATEEVENTCONFIG
_VALIDATEEVENTREQUEST.fields_by_name['postProcessingAction'].message_type = _AUDIOREQUESTPOSTPROCESSINGACTION
_VALIDATEEVENTREQUEST.oneofs_by_name['streamingRequest'].fields.append(
_VALIDATEEVENTREQUEST.fields_by_name['config'])
_VALIDATEEVENTREQUEST.fields_by_name['config'].containing_oneof = _VALIDATEEVENTREQUEST.oneofs_by_name['streamingRequest']
_VALIDATEEVENTREQUEST.oneofs_by_name['streamingRequest'].fields.append(
_VALIDATEEVENTREQUEST.fields_by_name['audioContent'])
_VALIDATEEVENTREQUEST.fields_by_name['audioContent'].containing_oneof = _VALIDATEEVENTREQUEST.oneofs_by_name['streamingRequest']
_CREATEENROLLEDEVENTREQUEST.fields_by_name['config'].message_type = _CREATEENROLLMENTEVENTCONFIG
_CREATEENROLLEDEVENTREQUEST.oneofs_by_name['streamingRequest'].fields.append(
_CREATEENROLLEDEVENTREQUEST.fields_by_name['config'])
_CREATEENROLLEDEVENTREQUEST.fields_by_name['config'].containing_oneof = _CREATEENROLLEDEVENTREQUEST.oneofs_by_name['streamingRequest']
_CREATEENROLLEDEVENTREQUEST.oneofs_by_name['streamingRequest'].fields.append(
_CREATEENROLLEDEVENTREQUEST.fields_by_name['audioContent'])
_CREATEENROLLEDEVENTREQUEST.fields_by_name['audioContent'].containing_oneof = _CREATEENROLLEDEVENTREQUEST.oneofs_by_name['streamingRequest']
_VALIDATEENROLLEDEVENTREQUEST.fields_by_name['config'].message_type = _VALIDATEENROLLEDEVENTCONFIG
_VALIDATEENROLLEDEVENTREQUEST.oneofs_by_name['streamingRequest'].fields.append(
_VALIDATEENROLLEDEVENTREQUEST.fields_by_name['config'])
_VALIDATEENROLLEDEVENTREQUEST.fields_by_name['config'].containing_oneof = _VALIDATEENROLLEDEVENTREQUEST.oneofs_by_name['streamingRequest']
_VALIDATEENROLLEDEVENTREQUEST.oneofs_by_name['streamingRequest'].fields.append(
_VALIDATEENROLLEDEVENTREQUEST.fields_by_name['audioContent'])
_VALIDATEENROLLEDEVENTREQUEST.fields_by_name['audioContent'].containing_oneof = _VALIDATEENROLLEDEVENTREQUEST.oneofs_by_name['streamingRequest']
_TRANSCRIBEREQUEST.fields_by_name['config'].message_type = _TRANSCRIBECONFIG
_TRANSCRIBEREQUEST.fields_by_name['postProcessingAction'].message_type = _AUDIOREQUESTPOSTPROCESSINGACTION
_TRANSCRIBEREQUEST.oneofs_by_name['streamingRequest'].fields.append(
_TRANSCRIBEREQUEST.fields_by_name['config'])
_TRANSCRIBEREQUEST.fields_by_name['config'].containing_oneof = _TRANSCRIBEREQUEST.oneofs_by_name['streamingRequest']
_TRANSCRIBEREQUEST.oneofs_by_name['streamingRequest'].fields.append(
_TRANSCRIBEREQUEST.fields_by_name['audioContent'])
_TRANSCRIBEREQUEST.fields_by_name['audioContent'].containing_oneof = _TRANSCRIBEREQUEST.oneofs_by_name['streamingRequest']
_AUTHENTICATERESPONSE.fields_by_name['token'].message_type = common_dot_common__pb2._TOKENRESPONSE
_VALIDATEEVENTRESPONSE.fields_by_name['postProcessingAction'].message_type = _AUDIORESPONSEPOSTPROCESSINGACTION
_TRANSCRIBERESPONSE.fields_by_name['postProcessingAction'].message_type = _AUDIORESPONSEPOSTPROCESSINGACTION
_CREATEENROLLMENTCONFIG.fields_by_name['audio'].message_type = _AUDIOCONFIG
_CREATEENROLLMENTCONFIG.oneofs_by_name['enrollLength'].fields.append(
_CREATEENROLLMENTCONFIG.fields_by_name['enrollmentNumUtterances'])
_CREATEENROLLMENTCONFIG.fields_by_name['enrollmentNumUtterances'].containing_oneof = _CREATEENROLLMENTCONFIG.oneofs_by_name['enrollLength']
_CREATEENROLLMENTCONFIG.oneofs_by_name['enrollLength'].fields.append(
_CREATEENROLLMENTCONFIG.fields_by_name['enrollmentDuration'])
_CREATEENROLLMENTCONFIG.fields_by_name['enrollmentDuration'].containing_oneof = _CREATEENROLLMENTCONFIG.oneofs_by_name['enrollLength']
_AUTHENTICATECONFIG.fields_by_name['audio'].message_type = _AUDIOCONFIG
_AUTHENTICATECONFIG.fields_by_name['sensitivity'].enum_type = _THRESHOLDSENSITIVITY
_AUTHENTICATECONFIG.fields_by_name['security'].enum_type = _AUTHENTICATECONFIG_THRESHOLDSECURITY
_AUTHENTICATECONFIG_THRESHOLDSECURITY.containing_type = _AUTHENTICATECONFIG
_AUTHENTICATECONFIG.oneofs_by_name['authId'].fields.append(
_AUTHENTICATECONFIG.fields_by_name['enrollmentId'])
_AUTHENTICATECONFIG.fields_by_name['enrollmentId'].containing_oneof = _AUTHENTICATECONFIG.oneofs_by_name['authId']
_AUTHENTICATECONFIG.oneofs_by_name['authId'].fields.append(
_AUTHENTICATECONFIG.fields_by_name['enrollmentGroupId'])
_AUTHENTICATECONFIG.fields_by_name['enrollmentGroupId'].containing_oneof = _AUTHENTICATECONFIG.oneofs_by_name['authId']
_VALIDATEEVENTCONFIG.fields_by_name['audio'].message_type = _AUDIOCONFIG
_VALIDATEEVENTCONFIG.fields_by_name['sensitivity'].enum_type = _THRESHOLDSENSITIVITY
_CREATEENROLLMENTEVENTCONFIG.fields_by_name['audio'].message_type = _AUDIOCONFIG
_CREATEENROLLMENTEVENTCONFIG.oneofs_by_name['enrollLength'].fields.append(
_CREATEENROLLMENTEVENTCONFIG.fields_by_name['enrollmentNumUtterances'])
_CREATEENROLLMENTEVENTCONFIG.fields_by_name['enrollmentNumUtterances'].containing_oneof = _CREATEENROLLMENTEVENTCONFIG.oneofs_by_name['enrollLength']
_CREATEENROLLMENTEVENTCONFIG.oneofs_by_name['enrollLength'].fields.append(
_CREATEENROLLMENTEVENTCONFIG.fields_by_name['enrollmentDuration'])
_CREATEENROLLMENTEVENTCONFIG.fields_by_name['enrollmentDuration'].containing_oneof = _CREATEENROLLMENTEVENTCONFIG.oneofs_by_name['enrollLength']
_VALIDATEENROLLEDEVENTCONFIG.fields_by_name['audio'].message_type = _AUDIOCONFIG
_VALIDATEENROLLEDEVENTCONFIG.fields_by_name['sensitivity'].enum_type = _THRESHOLDSENSITIVITY
_VALIDATEENROLLEDEVENTCONFIG.oneofs_by_name['authId'].fields.append(
_VALIDATEENROLLEDEVENTCONFIG.fields_by_name['enrollmentId'])
_VALIDATEENROLLEDEVENTCONFIG.fields_by_name['enrollmentId'].containing_oneof = _VALIDATEENROLLEDEVENTCONFIG.oneofs_by_name['authId']
_VALIDATEENROLLEDEVENTCONFIG.oneofs_by_name['authId'].fields.append(
_VALIDATEENROLLEDEVENTCONFIG.fields_by_name['enrollmentGroupId'])
_VALIDATEENROLLEDEVENTCONFIG.fields_by_name['enrollmentGroupId'].containing_oneof = _VALIDATEENROLLEDEVENTCONFIG.oneofs_by_name['authId']
_TRANSCRIBECONFIG.fields_by_name['audio'].message_type = _AUDIOCONFIG
_AUDIOCONFIG.fields_by_name['encoding'].enum_type = _AUDIOCONFIG_AUDIOENCODING
_AUDIOCONFIG_AUDIOENCODING.containing_type = _AUDIOCONFIG
DESCRIPTOR.message_types_by_name['GetModelsRequest'] = _GETMODELSREQUEST
DESCRIPTOR.message_types_by_name['AudioModel'] = _AUDIOMODEL
DESCRIPTOR.message_types_by_name['AudioRequestPostProcessingAction'] = _AUDIOREQUESTPOSTPROCESSINGACTION
DESCRIPTOR.message_types_by_name['AudioResponsePostProcessingAction'] = _AUDIORESPONSEPOSTPROCESSINGACTION
DESCRIPTOR.message_types_by_name['GetModelsResponse'] = _GETMODELSRESPONSE
DESCRIPTOR.message_types_by_name['CreateEnrollmentRequest'] = _CREATEENROLLMENTREQUEST
DESCRIPTOR.message_types_by_name['AuthenticateRequest'] = _AUTHENTICATEREQUEST
DESCRIPTOR.message_types_by_name['ValidateEventRequest'] = _VALIDATEEVENTREQUEST
DESCRIPTOR.message_types_by_name['CreateEnrolledEventRequest'] = _CREATEENROLLEDEVENTREQUEST
DESCRIPTOR.message_types_by_name['ValidateEnrolledEventRequest'] = _VALIDATEENROLLEDEVENTREQUEST
DESCRIPTOR.message_types_by_name['TranscribeRequest'] = _TRANSCRIBEREQUEST
DESCRIPTOR.message_types_by_name['CreateEnrollmentResponse'] = _CREATEENROLLMENTRESPONSE
DESCRIPTOR.message_types_by_name['AuthenticateResponse'] = _AUTHENTICATERESPONSE
DESCRIPTOR.message_types_by_name['ValidateEventResponse'] = _VALIDATEEVENTRESPONSE
DESCRIPTOR.message_types_by_name['ValidateEnrolledEventResponse'] = _VALIDATEENROLLEDEVENTRESPONSE
DESCRIPTOR.message_types_by_name['TranscribeResponse'] = _TRANSCRIBERESPONSE
DESCRIPTOR.message_types_by_name['CreateEnrollmentConfig'] = _CREATEENROLLMENTCONFIG
DESCRIPTOR.message_types_by_name['AuthenticateConfig'] = _AUTHENTICATECONFIG
DESCRIPTOR.message_types_by_name['ValidateEventConfig'] = _VALIDATEEVENTCONFIG
DESCRIPTOR.message_types_by_name['CreateEnrollmentEventConfig'] = _CREATEENROLLMENTEVENTCONFIG
DESCRIPTOR.message_types_by_name['ValidateEnrolledEventConfig'] = _VALIDATEENROLLEDEVENTCONFIG
DESCRIPTOR.message_types_by_name['TranscribeConfig'] = _TRANSCRIBECONFIG
DESCRIPTOR.message_types_by_name['AudioConfig'] = _AUDIOCONFIG
DESCRIPTOR.enum_types_by_name['AudioPostProcessingAction'] = _AUDIOPOSTPROCESSINGACTION
DESCRIPTOR.enum_types_by_name['ThresholdSensitivity'] = _THRESHOLDSENSITIVITY
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GetModelsRequest = _reflection.GeneratedProtocolMessageType('GetModelsRequest', (_message.Message,), {
'DESCRIPTOR' : _GETMODELSREQUEST,
'__module__' : 'v1.audio.audio_pb2'
# @@protoc_insertion_point(class_scope:sensory.api.v1.audio.GetModelsRequest)
})
_sym_db.RegisterMessage(GetModelsRequest)
AudioModel = _reflection.GeneratedProtocolMessageType('AudioModel', (_message.Message,), {
'DESCRIPTOR' : _AUDIOMODEL,
'__module__' : 'v1.audio.audio_pb2'
# @@protoc_insertion_point(class_scope:sensory.api.v1.audio.AudioModel)
})
_sym_db.RegisterMessage(AudioModel)
AudioRequestPostProcessingAction = _reflection.GeneratedProtocolMessageType('AudioRequestPostProcessingAction', (_message.Message,), {
'DESCRIPTOR' : _AUDIOREQUESTPOSTPROCESSINGACTION,
'__module__' : 'v1.audio.audio_pb2'
# @@protoc_insertion_point(class_scope:sensory.api.v1.audio.AudioRequestPostProcessingAction)
})
_sym_db.RegisterMessage(AudioRequestPostProcessingAction)
AudioResponsePostProcessingAction = _reflection.GeneratedProtocolMessageType('AudioResponsePostProcessingAction', (_message.Message,), {
'DESCRIPTOR' : _AUDIORESPONSEPOSTPROCESSINGACTION,
'__module__' : 'v1.audio.audio_pb2'
# @@protoc_insertion_point(class_scope:sensory.api.v1.audio.AudioResponsePostProcessingAction)
})
_sym_db.RegisterMessage(AudioResponsePostProcessingAction)
GetModelsResponse = _reflection.GeneratedProtocolMessageType('GetModelsResponse', (_message.Message,), {
'DESCRIPTOR' : _GETMODELSRESPONSE,
'__module__' : 'v1.audio.audio_pb2'
# @@protoc_insertion_point(class_scope:sensory.api.v1.audio.GetModelsResponse)
})
_sym_db.RegisterMessage(GetModelsResponse)
CreateEnrollmentRequest = _reflection.GeneratedProtocolMessageType('CreateEnrollmentRequest', (_message.Message,), {
'DESCRIPTOR' : _CREATEENROLLMENTREQUEST,
'__module__' : 'v1.audio.audio_pb2'
# @@protoc_insertion_point(class_scope:sensory.api.v1.audio.CreateEnrollmentRequest)
})
_sym_db.RegisterMessage(CreateEnrollmentRequest)
AuthenticateRequest = _reflection.GeneratedProtocolMessageType('AuthenticateRequest', (_message.Message,), {
'DESCRIPTOR' : _AUTHENTICATEREQUEST,
'__module__' : 'v1.audio.audio_pb2'
# @@protoc_insertion_point(class_scope:sensory.api.v1.audio.AuthenticateRequest)
})
_sym_db.RegisterMessage(AuthenticateRequest)
ValidateEventRequest = _reflection.GeneratedProtocolMessageType('ValidateEventRequest', (_message.Message,), {
'DESCRIPTOR' : _VALIDATEEVENTREQUEST,
'__module__' : 'v1.audio.audio_pb2'
# @@protoc_insertion_point(class_scope:sensory.api.v1.audio.ValidateEventRequest)
})
_sym_db.RegisterMessage(ValidateEventRequest)
CreateEnrolledEventRequest = _reflection.GeneratedProtocolMessageType('CreateEnrolledEventRequest', (_message.Message,), {
'DESCRIPTOR' : _CREATEENROLLEDEVENTREQUEST,
'__module__' : 'v1.audio.audio_pb2'
# @@protoc_insertion_point(class_scope:sensory.api.v1.audio.CreateEnrolledEventRequest)
})
_sym_db.RegisterMessage(CreateEnrolledEventRequest)
ValidateEnrolledEventRequest = _reflection.GeneratedProtocolMessageType('ValidateEnrolledEventRequest', (_message.Message,), {
'DESCRIPTOR' : _VALIDATEENROLLEDEVENTREQUEST,
'__module__' : 'v1.audio.audio_pb2'
# @@protoc_insertion_point(class_scope:sensory.api.v1.audio.ValidateEnrolledEventRequest)
})
_sym_db.RegisterMessage(ValidateEnrolledEventRequest)
TranscribeRequest = _reflection.GeneratedProtocolMessageType('TranscribeRequest', (_message.Message,), {
'DESCRIPTOR' : _TRANSCRIBEREQUEST,
'__module__' : 'v1.audio.audio_pb2'
# @@protoc_insertion_point(class_scope:sensory.api.v1.audio.TranscribeRequest)
})
_sym_db.RegisterMessage(TranscribeRequest)
CreateEnrollmentResponse = _reflection.GeneratedProtocolMessageType('CreateEnrollmentResponse', (_message.Message,), {
'DESCRIPTOR' : _CREATEENROLLMENTRESPONSE,
'__module__' : 'v1.audio.audio_pb2'
# @@protoc_insertion_point(class_scope:sensory.api.v1.audio.CreateEnrollmentResponse)
})
_sym_db.RegisterMessage(CreateEnrollmentResponse)
AuthenticateResponse = _reflection.GeneratedProtocolMessageType('AuthenticateResponse', (_message.Message,), {
'DESCRIPTOR' : _AUTHENTICATERESPONSE,
'__module__' : 'v1.audio.audio_pb2'
# @@protoc_insertion_point(class_scope:sensory.api.v1.audio.AuthenticateResponse)
})
_sym_db.RegisterMessage(AuthenticateResponse)
ValidateEventResponse = _reflection.GeneratedProtocolMessageType('ValidateEventResponse', (_message.Message,), {
'DESCRIPTOR' : _VALIDATEEVENTRESPONSE,
'__module__' : 'v1.audio.audio_pb2'
# @@protoc_insertion_point(class_scope:sensory.api.v1.audio.ValidateEventResponse)
})
_sym_db.RegisterMessage(ValidateEventResponse)
ValidateEnrolledEventResponse = _reflection.GeneratedProtocolMessageType('ValidateEnrolledEventResponse', (_message.Message,), {
'DESCRIPTOR' : _VALIDATEENROLLEDEVENTRESPONSE,
'__module__' : 'v1.audio.audio_pb2'
# @@protoc_insertion_point(class_scope:sensory.api.v1.audio.ValidateEnrolledEventResponse)
})
_sym_db.RegisterMessage(ValidateEnrolledEventResponse)
TranscribeResponse = _reflection.GeneratedProtocolMessageType('TranscribeResponse', (_message.Message,), {
'DESCRIPTOR' : _TRANSCRIBERESPONSE,
'__module__' : 'v1.audio.audio_pb2'
# @@protoc_insertion_point(class_scope:sensory.api.v1.audio.TranscribeResponse)
})
_sym_db.RegisterMessage(TranscribeResponse)
CreateEnrollmentConfig = _reflection.GeneratedProtocolMessageType('CreateEnrollmentConfig', (_message.Message,), {
'DESCRIPTOR' : _CREATEENROLLMENTCONFIG,
'__module__' : 'v1.audio.audio_pb2'
# @@protoc_insertion_point(class_scope:sensory.api.v1.audio.CreateEnrollmentConfig)
})
_sym_db.RegisterMessage(CreateEnrollmentConfig)
AuthenticateConfig = _reflection.GeneratedProtocolMessageType('AuthenticateConfig', (_message.Message,), {
'DESCRIPTOR' : _AUTHENTICATECONFIG,
'__module__' : 'v1.audio.audio_pb2'
# @@protoc_insertion_point(class_scope:sensory.api.v1.audio.AuthenticateConfig)
})
_sym_db.RegisterMessage(AuthenticateConfig)
ValidateEventConfig = _reflection.GeneratedProtocolMessageType('ValidateEventConfig', (_message.Message,), {
'DESCRIPTOR' : _VALIDATEEVENTCONFIG,
'__module__' : 'v1.audio.audio_pb2'
# @@protoc_insertion_point(class_scope:sensory.api.v1.audio.ValidateEventConfig)
})
_sym_db.RegisterMessage(ValidateEventConfig)
CreateEnrollmentEventConfig = _reflection.GeneratedProtocolMessageType('CreateEnrollmentEventConfig', (_message.Message,), {
'DESCRIPTOR' : _CREATEENROLLMENTEVENTCONFIG,
'__module__' : 'v1.audio.audio_pb2'
# @@protoc_insertion_point(class_scope:sensory.api.v1.audio.CreateEnrollmentEventConfig)
})
_sym_db.RegisterMessage(CreateEnrollmentEventConfig)
ValidateEnrolledEventConfig = _reflection.GeneratedProtocolMessageType('ValidateEnrolledEventConfig', (_message.Message,), {
'DESCRIPTOR' : _VALIDATEENROLLEDEVENTCONFIG,
'__module__' : 'v1.audio.audio_pb2'
# @@protoc_insertion_point(class_scope:sensory.api.v1.audio.ValidateEnrolledEventConfig)
})
_sym_db.RegisterMessage(ValidateEnrolledEventConfig)
TranscribeConfig = _reflection.GeneratedProtocolMessageType('TranscribeConfig', (_message.Message,), {
'DESCRIPTOR' : _TRANSCRIBECONFIG,
'__module__' : 'v1.audio.audio_pb2'
# @@protoc_insertion_point(class_scope:sensory.api.v1.audio.TranscribeConfig)
})
_sym_db.RegisterMessage(TranscribeConfig)
AudioConfig = _reflection.GeneratedProtocolMessageType('AudioConfig', (_message.Message,), {
'DESCRIPTOR' : _AUDIOCONFIG,
'__module__' : 'v1.audio.audio_pb2'
# @@protoc_insertion_point(class_scope:sensory.api.v1.audio.AudioConfig)
})
_sym_db.RegisterMessage(AudioConfig)
DESCRIPTOR._options = None
_AUDIOREQUESTPOSTPROCESSINGACTION.fields_by_name['action']._options = None
_AUDIORESPONSEPOSTPROCESSINGACTION.fields_by_name['action']._options = None
_CREATEENROLLMENTREQUEST.oneofs_by_name['streamingRequest']._options = None
_AUTHENTICATEREQUEST.oneofs_by_name['streamingRequest']._options = None
_VALIDATEEVENTREQUEST.oneofs_by_name['streamingRequest']._options = None
_CREATEENROLLEDEVENTREQUEST.oneofs_by_name['streamingRequest']._options = None
_VALIDATEENROLLEDEVENTREQUEST.oneofs_by_name['streamingRequest']._options = None
_TRANSCRIBEREQUEST.oneofs_by_name['streamingRequest']._options = None
_CREATEENROLLMENTCONFIG.fields_by_name['audio']._options = None
_CREATEENROLLMENTCONFIG.fields_by_name['userId']._options = None
_CREATEENROLLMENTCONFIG.fields_by_name['deviceId']._options = None
_CREATEENROLLMENTCONFIG.fields_by_name['modelName']._options = None
_CREATEENROLLMENTCONFIG.fields_by_name['description']._options = None
_CREATEENROLLMENTCONFIG.fields_by_name['enrollmentNumUtterances']._options = None
_CREATEENROLLMENTCONFIG.fields_by_name['enrollmentDuration']._options = None
_CREATEENROLLMENTCONFIG.fields_by_name['referenceId']._options = None
_AUTHENTICATECONFIG.oneofs_by_name['authId']._options = None
_AUTHENTICATECONFIG.fields_by_name['audio']._options = None
_AUTHENTICATECONFIG.fields_by_name['enrollmentId']._options = None
_AUTHENTICATECONFIG.fields_by_name['sensitivity']._options = None
_AUTHENTICATECONFIG.fields_by_name['security']._options = None
_VALIDATEEVENTCONFIG.fields_by_name['audio']._options = None
_VALIDATEEVENTCONFIG.fields_by_name['modelName']._options = None
_VALIDATEEVENTCONFIG.fields_by_name['userId']._options = None
_VALIDATEEVENTCONFIG.fields_by_name['sensitivity']._options = None
_CREATEENROLLMENTEVENTCONFIG.fields_by_name['audio']._options = None
_CREATEENROLLMENTEVENTCONFIG.fields_by_name['userId']._options = None
_CREATEENROLLMENTEVENTCONFIG.fields_by_name['modelName']._options = None
_CREATEENROLLMENTEVENTCONFIG.fields_by_name['description']._options = None
_CREATEENROLLMENTEVENTCONFIG.fields_by_name['enrollmentNumUtterances']._options = None
_CREATEENROLLMENTEVENTCONFIG.fields_by_name['enrollmentDuration']._options = None
_CREATEENROLLMENTEVENTCONFIG.fields_by_name['referenceId']._options = None
_VALIDATEENROLLEDEVENTCONFIG.oneofs_by_name['authId']._options = None
_VALIDATEENROLLEDEVENTCONFIG.fields_by_name['audio']._options = None
_VALIDATEENROLLEDEVENTCONFIG.fields_by_name['enrollmentId']._options = None
_VALIDATEENROLLEDEVENTCONFIG.fields_by_name['sensitivity']._options = None
_TRANSCRIBECONFIG.fields_by_name['audio']._options = None
_TRANSCRIBECONFIG.fields_by_name['modelName']._options = None
_TRANSCRIBECONFIG.fields_by_name['userId']._options = None
_AUDIOCONFIG.fields_by_name['encoding']._options = None
_AUDIOCONFIG.fields_by_name['sampleRateHertz']._options = None
_AUDIOCONFIG.fields_by_name['audioChannelCount']._options = None
_AUDIOMODELS = _descriptor.ServiceDescriptor(
name='AudioModels',
full_name='sensory.api.v1.audio.AudioModels',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=4673,
serialized_end=4782,
methods=[
_descriptor.MethodDescriptor(
name='GetModels',
full_name='sensory.api.v1.audio.AudioModels.GetModels',
index=0,
containing_service=None,
input_type=_GETMODELSREQUEST,
output_type=_GETMODELSRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_AUDIOMODELS)
DESCRIPTOR.services_by_name['AudioModels'] = _AUDIOMODELS
_AUDIOBIOMETRICS = _descriptor.ServiceDescriptor(
name='AudioBiometrics',
full_name='sensory.api.v1.audio.AudioBiometrics',
file=DESCRIPTOR,
index=1,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=4785,
serialized_end=5032,
methods=[
_descriptor.MethodDescriptor(
name='CreateEnrollment',
full_name='sensory.api.v1.audio.AudioBiometrics.CreateEnrollment',
index=0,
containing_service=None,
input_type=_CREATEENROLLMENTREQUEST,
output_type=_CREATEENROLLMENTRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Authenticate',
full_name='sensory.api.v1.audio.AudioBiometrics.Authenticate',
index=1,
containing_service=None,
input_type=_AUTHENTICATEREQUEST,
output_type=_AUTHENTICATERESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_AUDIOBIOMETRICS)
DESCRIPTOR.services_by_name['AudioBiometrics'] = _AUDIOBIOMETRICS
_AUDIOEVENTS = _descriptor.ServiceDescriptor(
name='AudioEvents',
full_name='sensory.api.v1.audio.AudioEvents',
file=DESCRIPTOR,
index=2,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=5035,
serialized_end=5424,
methods=[
_descriptor.MethodDescriptor(
name='ValidateEvent',
full_name='sensory.api.v1.audio.AudioEvents.ValidateEvent',
index=0,
containing_service=None,
input_type=_VALIDATEEVENTREQUEST,
output_type=_VALIDATEEVENTRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='CreateEnrolledEvent',
full_name='sensory.api.v1.audio.AudioEvents.CreateEnrolledEvent',
index=1,
containing_service=None,
input_type=_CREATEENROLLEDEVENTREQUEST,
output_type=_CREATEENROLLMENTRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='ValidateEnrolledEvent',
full_name='sensory.api.v1.audio.AudioEvents.ValidateEnrolledEvent',
index=2,
containing_service=None,
input_type=_VALIDATEENROLLEDEVENTREQUEST,
output_type=_VALIDATEENROLLEDEVENTRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_AUDIOEVENTS)
DESCRIPTOR.services_by_name['AudioEvents'] = _AUDIOEVENTS
_AUDIOTRANSCRIPTIONS = _descriptor.ServiceDescriptor(
name='AudioTranscriptions',
full_name='sensory.api.v1.audio.AudioTranscriptions',
file=DESCRIPTOR,
index=3,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=5426,
serialized_end=5550,
methods=[
_descriptor.MethodDescriptor(
name='Transcribe',
full_name='sensory.api.v1.audio.AudioTranscriptions.Transcribe',
index=0,
containing_service=None,
input_type=_TRANSCRIBEREQUEST,
output_type=_TRANSCRIBERESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_AUDIOTRANSCRIPTIONS)
DESCRIPTOR.services_by_name['AudioTranscriptions'] = _AUDIOTRANSCRIPTIONS
# @@protoc_insertion_point(module_scope)
| 50.301579 | 9,760 | 0.776935 | 11,480 | 95,573 | 6.1723 | 0.044861 | 0.037145 | 0.063818 | 0.048703 | 0.803791 | 0.749245 | 0.708614 | 0.613876 | 0.613255 | 0.612478 | 0 | 0.048856 | 0.101797 | 95,573 | 1,899 | 9,761 | 50.328067 | 0.776571 | 0.021826 | 0 | 0.665352 | 1 | 0.004507 | 0.253644 | 0.199852 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.003944 | 0 | 0.003944 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
e76934ddff50c1735175d1022af854ad16918095 | 1,054 | py | Python | webapp/apps/carousel/models.py | eb-intl/eb-intl.com | 36f0026c4af61aa68fd294871fdd693680f690ce | [
"MIT"
] | null | null | null | webapp/apps/carousel/models.py | eb-intl/eb-intl.com | 36f0026c4af61aa68fd294871fdd693680f690ce | [
"MIT"
] | null | null | null | webapp/apps/carousel/models.py | eb-intl/eb-intl.com | 36f0026c4af61aa68fd294871fdd693680f690ce | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
from django.db import models
from django.contrib.sites.models import Site
from photologue.models import Photo
class Layer(models.Model):
order = models.IntegerField(default=0)
slug = models.CharField(max_length=512, blank=True, null=True)
name = models.CharField(max_length=512, blank=True, null=True)
description = models.TextField(blank=True, null=True)
image = models.ForeignKey(Photo, related_name='employees')
featured = models.BooleanField(default=False)
def __unicode__(self):
return self.name
class Slide(models.Model):
sites = models.ManyToManyField(Site)
order = models.IntegerField(default=0)
slug = models.CharField(max_length=512, blank=True, null=True)
name = models.CharField(max_length=512, blank=True, null=True)
description = models.TextField(blank=True, null=True)
image = models.ForeignKey(Photo, related_name='employees')
featured = models.BooleanField(default=False)
def __unicode__(self):
return self.name
| 31.939394 | 66 | 0.739089 | 134 | 1,054 | 5.671642 | 0.320896 | 0.071053 | 0.102632 | 0.134211 | 0.742105 | 0.742105 | 0.742105 | 0.742105 | 0.742105 | 0.742105 | 0 | 0.01573 | 0.155598 | 1,054 | 32 | 67 | 32.9375 | 0.838202 | 0 | 0 | 0.695652 | 0 | 0 | 0.017094 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0.173913 | 0.086957 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 6 |
e78ecf52fc356a4a50a0abeebacc40ab0393ea20 | 194,913 | py | Python | tests/functional/tests/test_ctia_api.py | CiscoSecurity/tr-05-api-module | ce0f8d583b2fce3aadcc5a5c174a5b2b23e14d72 | [
"MIT"
] | 10 | 2019-07-16T15:11:05.000Z | 2022-02-07T19:58:55.000Z | tests/functional/tests/test_ctia_api.py | CiscoSecurity/tr-05-api-module | ce0f8d583b2fce3aadcc5a5c174a5b2b23e14d72 | [
"MIT"
] | 26 | 2019-07-18T09:31:12.000Z | 2021-11-19T09:52:50.000Z | tests/functional/tests/test_ctia_api.py | CiscoSecurity/tr-05-api-module | ce0f8d583b2fce3aadcc5a5c174a5b2b23e14d72 | [
"MIT"
] | 13 | 2019-07-15T12:31:35.000Z | 2021-02-23T16:57:38.000Z | import pytest
import random
import json
from requests import HTTPError
from ctrlibrary.core.utils import delayed_return
from ctrlibrary.ctia.base import ctia_get_data
from ctrlibrary.ctia.endpoints import (
ACTOR,
ATTACK_PATTERN,
ASSET,
ASSET_MAPPING,
ASSET_PROPERTIES,
CAMPAIGN,
CASEBOOK,
COA,
DATA_TABLE,
FEED,
FEEDBACK,
IDENTITY_ASSERTION,
INCIDENT,
INDICATOR,
INVESTIGATION,
JUDGEMENT,
MALWARE,
RELATIONSHIP,
SIGHTING,
TARGET_RECORD,
TOOL,
VERDICT,
VULNERABILITY,
WEAKNESS,
)
from tests.functional.tests.payloads import (
ACTOR_PAYLOAD,
PUT_ACTOR_PAYLOAD,
SIGHTING_PAYLOAD,
PUT_SIGHTING_PAYLOAD,
INCIDENT_PAYLOAD,
PUT_INCIDENT_PAYLOAD,
ASSET_PAYLOAD,
PUT_ASSET_PAYLOAD,
ASSET_MAPPING_PAYLOAD,
ASSET_PROPERTIES_PAYLOAD,
ATTACK_PATTERN_PAYLOAD,
PUT_ATTACK_PATTERN_PAYLOAD,
CAMPAIGN_PAYLOAD,
PUT_CAMPAIGN_PAYLOAD,
COA_PAYLOAD,
CASEBOOK_PAYLOAD,
CASEBOOK_PATCH_PAYLOAD,
DATA_TABLE_PAYLOAD,
FEED_PAYLOAD,
FEEDBACK_PAYLOAD,
IDENTITY_ASSERTION_PAYLOAD,
PUT_IDENTITY_ASSERTION_PAYLOAD,
INDICATOR_PAYLOAD,
INVESTIGATION_PAYLOAD,
JUDGEMENT_PAYLOAD,
PUT_JUDGEMENT_PAYLOAD,
MALWARE_PAYLOAD,
PUT_MALWARE_PAYLOAD,
RELATIONSHIP_PAYLOAD,
TARGET_RECORD_PAYLOAD,
PUT_TARGET_RECORD_PAYLOAD,
TOOL_PAYLOAD,
PUT_TOOL_PAYLOAD,
VULNERABILITY_PAYLOAD,
WEAKNESS_PAYLOAD
)
def test_python_module_ctia_positive_actor(
module_headers, get_entity, get_entity_response):
"""Perform testing for actor entity of custom threat intelligence python
module
ID: CCTRI-160-1f5de8b8-11a8-4110-a982-8547a2202789
Steps:
1. Send POST request to create new actor entity using custom python
module
2. Send GET request using custom python module to read just created
entity back.
3. Send same GET request, but using direct access to the server
4. Compare results
5. Validate that GET request of external_id returns number of
external_id
6. Update actor entity using custom python module
7. Repeat GET request using python module and validate that entity was
updated
8. Delete entity from the system
Expected results: Actor entity can be created, fetched, updated and
deleted using custom python module. Data stored in the entity is
the same no matter you access it directly or using our tool
Importance: Critical
"""
actor = get_entity('actor')
# Create new entity using provided payload
actor_post_tool_response = get_entity_response(
'actor', ACTOR_PAYLOAD)
values = {
key: actor_post_tool_response[key] for key in [
'actor_type',
'confidence',
'schema_version',
'source',
'type',
'description',
'short_description',
'title',
'external_ids'
]
}
assert values == ACTOR_PAYLOAD
# Validate that GET request return same data for direct access and access
# through custom python module
get_tool_response = actor.get(
actor_post_tool_response['id'].rpartition('/')[-1])
get_direct_response = ctia_get_data(
target_url=ACTOR,
entity_id=actor_post_tool_response['id'].rpartition('/')[-1],
**{'headers': module_headers}
).json()
assert get_tool_response == get_direct_response
# Validate that GET request of external_id returns number of external_id
external_id_result = actor.external_id(3)
assert external_id_result[0]['external_ids'] == ['3']
# Update entity values
put_tool_response = delayed_return(
actor.put(
id_=actor_post_tool_response['id'],
payload=PUT_ACTOR_PAYLOAD
)
)
assert put_tool_response['source'] == 'new source point'
get_tool_response = actor.get(actor_post_tool_response['id'])
assert get_tool_response['source'] == 'new source point'
def test_python_module_ctia_positive_actor_search(get_entity):
"""Perform testing for actor/search entity of custom threat
intelligence python module
ID: CCTRI-2848 - 9ba48f7c-19b5-45d9-b5f7-7966795c4abe
Steps:
1. Send POST request to create new actor entity using custom python
module
2. Send GET request using custom python module to read just created
entity back.
3. Count entities after entity created
4. Delete entity from the system
5. Repeat GET request using python module and validate that entity was
deleted
6. Count entities after entity deleted
7. Compare the amount of entities after creating and deleting entities
Expected results: Actor entity can be created, fetched, counted and
deleted using custom python module. Data stored in the entity is
the same no matter you access it directly or using our tool
Importance: Critical
"""
actor = get_entity('actor')
# Create new entity using provided payload
post_tool_response = actor.post(payload=ACTOR_PAYLOAD,
params={'wait_for': 'true'})
# Create variable for using it in params for endpoints
entity_id = post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_actor_search = actor.search.get(params={'id': entity_id})
assert get_actor_search[0]['type'] == 'actor'
assert get_actor_search[0]['description'] == 'For Test'
# Count entities after entity created
count_actor_before_deleted = actor.search.count()
# Delete the entity and make attempt to get it back to validate it is
# not there anymore
delayed_return(actor.search.delete(params={
'id': entity_id, 'REALLY_DELETE_ALL_THESE_ENTITIES': 'true'}))
# Repeat GET request and validate that entity was deleted
assert actor.search.get(params={'id': entity_id}) == []
# Count entities after entity deleted
count_actor_after_deleted = actor.search.count()
# Compare results of count_actor_before_deleted
# and count_actor_after_deleted
assert count_actor_before_deleted !=\
count_actor_after_deleted
def test_python_module_ctia_positive_actor_metric(
get_entity_response, get_entity):
"""Perform testing for actor/metric endpoints of custom threat
intelligence python module
ID: CCTRI-2848 -52c89f1b-9728-41d6-8a1f-07dd0ec8b976
Steps:
1. Send POST request to create new actor entity using custom python
module
2. Send GET request using custom python module to read just created
entity back.
3. Send GET request to get type of metric/histogram endpoint
4. Send GET request to get type of metric/topn endpoint
5. Send GET request to get type of metric/cardinality endpoint
Expected results: Actor entity can be created, fetched, researched by
metric's endpoints and deleted using custom python module.
Data stored in the entity is the same no matter you access it
directly or using our tool.
Importance: Critical
"""
actor = get_entity('actor')
# Create new entity using provided payload
actor_post_tool_response = get_entity_response(
'actor', ACTOR_PAYLOAD)
# Validate that GET request return same data for direct access and access
# through custom python module
get_created_actor = actor.get(actor_post_tool_response['id'])
assert get_created_actor['type'] == 'actor'
assert get_created_actor['description'] == 'For Test'
assert get_created_actor['source'] == 'Test source'
# Send GET request to get type of metric/histogram endpoint
data_from = get_created_actor['timestamp']
metric_histogram = actor.metric.histogram(params={
'granularity': 'week', 'from': data_from, 'aggregate-on': 'timestamp'})
assert metric_histogram['type'] == 'histogram'
# Send GET request to get type of metric/topn endpoint
metric_topn = actor.metric.topn(params={
'from': data_from, 'aggregate-on': 'source'})
assert metric_topn['type'] == 'topn'
# Send GET request to get type of metric/cardinality endpoint
metric_cardinality = actor.metric.cardinality(params={
'from': data_from, 'aggregate-on': 'source'})
assert metric_cardinality['type'] == 'cardinality'
def test_python_module_ctia_positive_asset(
module_headers, get_entity, get_entity_response):
"""Perform testing for asset entity of custom threat intelligence python
module
ID: CCTRI-2848-85594b4a-d53f-4285-9aa8-c13e21858e4b
Steps:
1. Send POST request to create new asset entity using custom python
module
2. Send GET request using custom python module to read just created
entity back.
3. Send same GET request, but using direct access to the server
4. Compare results
5. Validate that GET request of external_id returns number of
external_id
6. Update asset entity using custom python module
7. Repeat GET request using python module and validate that entity was
updated
Expected results: Asset entity can be created, fetched, updated and
deleted using custom python module. Data stored in the entity is
the same no matter you access it directly or using our tool
Importance: Critical
"""
asset = get_entity('asset')
# Create new entity using provided payload
asset_post_tool_response = get_entity_response(
'asset', ASSET_PAYLOAD)
values = {
key: asset_post_tool_response[key] for key in [
'asset_type',
'valid_time',
'schema_version',
'source',
'type',
'description',
'short_description',
'title',
'external_ids'
]
}
assert values == ASSET_PAYLOAD
entity_id = asset_post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_tool_response = asset.get(entity_id)
get_direct_response = ctia_get_data(
target_url=ASSET,
entity_id=entity_id,
**{'headers': module_headers}
).json()
assert get_tool_response == get_direct_response
# Validate that GET request of external_id returns number of external_id
external_id_result = asset.external_id(3)
assert external_id_result[0]['external_ids'] == ['3']
# Update entity values
put_tool_response = delayed_return(
asset.put(
id_=entity_id,
payload=PUT_ASSET_PAYLOAD
)
)
assert put_tool_response['asset_type'] == 'device'
get_tool_response = asset.get(entity_id)
assert get_tool_response['source'] == 'new source point'
def test_python_module_ctia_positive_asset_search(get_entity):
"""Perform testing for asset/search entity of custom threat
intelligence python module
ID: CCTRI-2848 - 593c7ea1-82f6-4484-beec-9aeecb20b4f3
Steps:
1. Send POST request to create new asset entity using custom python
module
2. Send GET request using custom python module to read just created
entity back.
3. Count entities after entity created
4. Delete entity from the system
5. Repeat GET request using python module and validate that entity was
deleted
6. Count entities after entity deleted
7. Compare the amount of entities after creating and deleting entities
Expected results: Asset entity can be created, fetched, counted and
deleted using custom python module. Data stored in the entity is
the same no matter you access it directly or using our tool
Importance: Critical
"""
asset = get_entity('asset')
# Create new entity using provided payload
post_tool_response = asset.post(payload=ASSET_PAYLOAD,
params={'wait_for': 'true'})
values = {
key: post_tool_response[key] for key in [
'asset_type',
'valid_time',
'schema_version',
'source',
'type',
'description',
'short_description',
'title',
'external_ids'
]
}
assert values == ASSET_PAYLOAD
entity_id = post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_asset_search = asset.search.get(params={'id': entity_id})
assert get_asset_search[0]['type'] == 'asset'
assert get_asset_search[0]['description'] == 'For Test'
# Count entities after entity created
count_asset_before_deleted = asset.search.count()
# Delete the entity and make attempt to get it back to validate it is
# not there anymore
delayed_return(asset.search.delete(params={
'id': entity_id, 'REALLY_DELETE_ALL_THESE_ENTITIES': 'true'}))
# Repeat GET request and validate that entity was deleted
assert asset.search.get(params={'id': entity_id}) == []
# Count entities after entity deleted
count_asset_after_deleted = asset.search.count()
# Compare results of count_asset_before_deleted and
# count_asset_after_deleted
assert count_asset_before_deleted != count_asset_after_deleted
def test_python_module_ctia_positive_asset_metric(
get_entity, get_entity_response):
"""Perform testing for asset/metric endpoints of custom threat
intelligence python module
ID: CCTRI-2848 -a1f492e4-5b8f-483f-8e50-40bb040b394a
Steps:
1. Send POST request to create new asset entity using custom python
module
2. Send GET request using custom python module to read just created
entity back.
3. Send GET request to get type of metric/histogram endpoint
4. Send GET request to get type of metric/topn endpoint
5. Send GET request to get type of metric/cardinality endpoint
Expected results: Asset entity can be created, fetched, researched by
metric's endpoints and deleted using custom python module.
Data stored in the entity is the same no matter you access it
directly or using our tool.
Importance: Critical
"""
asset = get_entity('asset')
asset_post_tool_response = get_entity_response(
'asset', ASSET_PAYLOAD)
# Validate that GET request return same data for direct access and access
# through custom python module
get_created_asset = asset.get(asset_post_tool_response['id'])
assert get_created_asset['type'] == 'asset'
assert get_created_asset['description'] == 'For Test'
assert get_created_asset['source'] == 'test source'
# Send GET request to get type of metric/histogram endpoint
data_from = get_created_asset['timestamp']
metric_histogram = asset.metric.histogram(params={
'granularity': 'week', 'from': data_from, 'aggregate-on': 'timestamp'})
assert metric_histogram['type'] == 'histogram'
# Send GET request to get type of metric/topn endpoint
metric_topn = asset.metric.topn(params={
'from': data_from, 'aggregate-on': 'asset_type'})
assert metric_topn['type'] == 'topn'
# Send GET request to get type of metric/cardinality endpoint
metric_cardinality = asset.metric.cardinality(params={
'from': data_from, 'aggregate-on': 'asset_type'})
assert metric_cardinality['type'] == 'cardinality'
def test_python_module_ctia_positive_asset_mapping(
module_headers, get_entity, get_entity_response):
"""Perform testing for asset mapping entity of custom threat intelligence
python module
ID: CCTRI-2906 - 9f30e585-2b89-46ba-9a2d-5df8c5b91bdc
Steps:
1. Send POST request to create new asset entity using custom
python module
2. Send GET request using custom python module to read just created
entity back.
3. Send same GET request, but using direct access to the server
4. Compare results
5. Send POST request to create new asset_mapping entity using custom
python module
6. Send GET request using custom python module to read just created
entity back.
7. Send same GET request, but using direct access to the server
8. Compare results
9. Validate that GET request of external_id returns number of
external_id
10. Update asset entity using custom python module
11. Repeat GET request using python module and validate that entity was
updated
Expected results: Asset mapping entity can be created, fetched, updated and
deleted using custom python module. Data stored in the entity is
the same no matter you access it directly or using our tool
Importance: Critical
"""
# Create new entity using provided payload
asset_post_tool_response = get_entity_response(
'asset', ASSET_PAYLOAD)
entity_id_asset = asset_post_tool_response['id']
asset_mapping = get_entity('asset_mapping')
# Create new asset_mapping entity using provided payload
asset_mapping_post_tool_response = get_entity_response(
'asset_mapping', ASSET_MAPPING_PAYLOAD,
dict(asset_ref=entity_id_asset))
values_asset_mapping = {
key: asset_mapping_post_tool_response[key] for key in [
'asset_type',
'asset_ref',
'confidence',
'stability',
'specificity',
'valid_time',
'schema_version',
'observable',
'source',
'type',
'external_ids'
]
}
assert values_asset_mapping == ASSET_MAPPING_PAYLOAD
entity_id_asset_mapping = \
asset_mapping_post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_asset_mapping_tool_response = \
asset_mapping.get(
asset_mapping_post_tool_response['id'].rpartition('/')[-1])
get_direct_response_asset_mapping = ctia_get_data(
target_url=ASSET_MAPPING,
entity_id=asset_mapping_post_tool_response['id'].rpartition('/')[-1],
**{'headers': module_headers}
).json()
assert get_asset_mapping_tool_response == get_direct_response_asset_mapping
# Validate that GET request of external_id returns number of external_ids
external_id_result = asset_mapping.external_id(3)
assert external_id_result[0]['external_ids'] == ['3']
# Create expired asset mapping
expired_asset_mapping = asset_mapping.expire(
asset_mapping_post_tool_response['id'], payload={})
assert expired_asset_mapping['source'] == 'test source'
# Update asset mapping entity values
put_tool_response = delayed_return(
asset_mapping.put(
id_=asset_mapping_post_tool_response['id'],
payload={
'asset_type': 'device',
'asset_ref': asset_mapping_post_tool_response['asset_ref'],
'confidence': 'Low',
'stability': 'Temporary',
'specificity': 'Medium',
'valid_time': {
"start_time": "2021-07-27T07:55:38.193Z",
"end_time": "2021-07-27T07:55:38.193Z"},
'schema_version': '1.1.3',
'observable': {
'value': '1.1.1.1',
'type': 'ip'
},
'source': 'New test source',
'type': 'asset-mapping'
}
)
)
assert put_tool_response['asset_type'] == 'device'
get_tool_response = asset_mapping.get(entity_id_asset_mapping)
assert get_tool_response['source'] == 'New test source'
assert get_tool_response['asset_type'] == 'device'
assert get_tool_response['confidence'] == 'Low'
assert get_tool_response['stability'] == 'Temporary'
def test_python_module_ctia_positive_asset_mapping_search(
get_entity_response, get_entity):
"""Perform testing for asset mapping/search entity of custom threat
intelligence python module
ID: CCTRI-2906 - 4d46be97-2134-43f7-bb09-cf7ccdb07de8
Steps:
1. Send POST request to create new asset entity using custom python
module
2. Send GET request using custom python module to read just created
entity back.
3. Send POST request to create new asset mapping entity using custom
python module
4. Send GET request using custom python module to read just created
entity back.
5. Count entities after entity created
6. Delete asset mapping entity from the system
7. Repeat GET request using python module and validate that entity was
deleted
8. Count entities after entity deleted
9. Compare the amount of entities after creating and deleting entities
Expected results: Asset mapping entity can be created, fetched, counted and
deleted using custom python module. Data stored in the entity is
the same no matter you access it directly or using our tool
Importance: Critical
"""
asset_post_tool_response = get_entity_response(
'asset', ASSET_PAYLOAD)
asset_ref = asset_post_tool_response['id']
asset_mapping = get_entity('asset_mapping')
# Create new asset_mapping entity using provided payload
payload_values_asset_mapping = {
'asset_type': 'data',
'asset_ref': asset_ref,
'confidence': 'High',
'stability': 'Physical',
'specificity': 'Medium',
'valid_time': {
"start_time": "2021-07-27T07:55:38.193Z",
"end_time": "2021-07-27T07:55:38.193Z"},
'schema_version': asset_post_tool_response['schema_version'],
'observable': {
'value': '1.1.1.1',
'type': 'ip'
},
'source': 'test source',
'type': 'asset-mapping',
'external_ids': ['3']
}
asset_mapping_post_tool_response = asset_mapping.post(
payload=payload_values_asset_mapping, params={'wait_for': 'true'})
entity_id_asset_mapping = \
asset_mapping_post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_asset_mapping_search = asset_mapping.search.get(
params={'id': entity_id_asset_mapping})
assert get_asset_mapping_search[0]['type'] == 'asset-mapping'
assert get_asset_mapping_search[0]['source'] == 'test source'
# Count entities after entity created
count_asset_mapping_before_deleted = asset_mapping.search.count()
# Delete the entity and make attempt to get it back to validate it is
# not there anymore
delayed_return(asset_mapping.search.delete(
params={'id': entity_id_asset_mapping,
'REALLY_DELETE_ALL_THESE_ENTITIES': 'true'}))
# Repeat GET request and validate that entity was deleted
assert asset_mapping.search.get(params={'id': entity_id_asset_mapping}) ==\
[]
# Count entities after entity deleted
count_asset_mapping_after_deleted = asset_mapping.search.count()
# Compare results of count_asset_mapping_before_deleted and
# count_asset_mapping_after_deleted
assert count_asset_mapping_before_deleted !=\
count_asset_mapping_after_deleted
def test_python_module_ctia_positive_asset_mapping_metric(
get_entity, get_entity_response):
"""Perform testing for asset mapping/metric endpoints of custom threat
intelligence python module
ID: CCTRI-2906 -6113d65c-3587-45b9-a111-f00f98719535
Steps:
1. Send POST request to create new asset entity using custom python
module
2. Send GET request using custom python module to read just created
entity back.
3. Send POST request to create new asset mapping entity using custom
python module
4. Send GET request using custom python module to read just created
entity back.
5. Send GET request to get type of metric/histogram endpoint
6. Send GET request to get type of metric/topn endpoint
7. Send GET request to get type of metric/cardinality endpoint
Expected results: Asset mapping entity can be created, fetched, researched
by metric's endpoints and deleted using custom python module.
Data stored in the entity is the same no matter you access it
directly or using our tool.
Importance: Critical
"""
asset_post_tool_response = get_entity_response(
'asset', ASSET_PAYLOAD)
entity_id_asset = asset_post_tool_response['id']
asset_mapping = get_entity('asset_mapping')
# Create new asset_mapping entity using provided payload
asset_mapping_post_tool_response = get_entity_response(
'asset_mapping', ASSET_MAPPING_PAYLOAD,
dict(asset_ref=entity_id_asset))
entity_id_asset_mapping = \
asset_mapping_post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_created_asset_mapping = asset_mapping.get(entity_id_asset_mapping)
assert get_created_asset_mapping['type'] == 'asset-mapping'
assert get_created_asset_mapping['confidence'] == 'High'
assert get_created_asset_mapping['source'] == 'test source'
# Send GET request to get type of metric/histogram endpoint
data_from = get_created_asset_mapping['timestamp']
metric_histogram = asset_mapping.metric.histogram(
params={'granularity': 'week', 'from': data_from,
'aggregate-on': 'timestamp'})
assert metric_histogram['type'] == 'histogram'
# Send GET request to get type of metric/topn endpoint
metric_topn = asset_mapping.metric.topn(params={
'from': data_from, 'aggregate-on': 'source'})
assert metric_topn['type'] == 'topn'
# Send GET request to get type of metric/cardinality endpoint
metric_cardinality = asset_mapping.metric.cardinality(
params={'from': data_from, 'aggregate-on': 'source'})
assert metric_cardinality['type'] == 'cardinality'
def test_python_module_ctia_positive_asset_properties(
module_headers, get_entity, get_entity_response):
"""Perform testing for asset properties entity of custom threat
intelligence python module
ID: CCTRI-2906 - 17265fc5-3137-4359-a396-81f214984aec
Steps:
1. Send POST request to create new asset entity using custom
python module
2. Send GET request using custom python module to read just created
entity back.
3. Send same GET request, but using direct access to the server
4. Compare results
5. Send POST request to create new asset properties entity using custom
python module
6. Send GET request using custom python module to read just created
entity back.
7. Send same GET request, but using direct access to the server
8. Compare results
9. Validate that GET request of external_id returns number of
external_id
10. Check expired endpoint
11. Update asset entity using custom python module
12. Repeat GET request using python module and validate that entity was
updated
Expected results: Asset properties entity can be created, fetched, updated
and deleted using custom python module. Data stored in the entity is
the same no matter you access it directly or using our tool
Importance: Critical
"""
# Create new entity using provided payload
asset_post_tool_response = get_entity_response(
'asset', ASSET_PAYLOAD)
entity_id_asset = asset_post_tool_response['id']
asset_properties = get_entity('asset_properties')
# Create new asset_mapping entity using provided payload
asset_properties_post_tool_response = get_entity_response(
'asset_properties', ASSET_PROPERTIES_PAYLOAD,
dict(asset_ref=entity_id_asset))
values_asset_properties = {
key: asset_properties_post_tool_response[key] for key in [
'asset_ref',
'valid_time',
'schema_version',
'source',
'type',
'external_ids'
]
}
assert values_asset_properties == ASSET_PROPERTIES_PAYLOAD
entity_id_asset_properties = \
asset_properties_post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_asset_properties_tool_response = \
asset_properties.get(entity_id_asset_properties)
get_direct_response_asset_properties = ctia_get_data(
target_url=ASSET_PROPERTIES,
entity_id=entity_id_asset_properties,
**{'headers': module_headers}
).json()
assert get_asset_properties_tool_response ==\
get_direct_response_asset_properties
# Validate that GET request of external_id returns number of external_ids
external_id_result = asset_properties.external_id(3)
assert external_id_result[0]['external_ids'] == ['3']
# Create expired asset properties
expired_asset_properties = asset_properties.expire(
entity_id_asset_properties, payload={})
assert expired_asset_properties['source'] == 'test source'
# Update asset properties entity values
put_tool_response = delayed_return(
asset_properties.put(
id_=entity_id_asset_properties,
payload={'asset_ref': asset_properties_post_tool_response['id'],
'valid_time': {
"start_time": "2021-07-27T07:55:38.193Z",
"end_time": "2021-07-27T07:55:38.193Z"},
'schema_version':
asset_properties_post_tool_response['schema_version'],
'source': 'New test source',
'type': 'asset-properties'
}
)
)
assert put_tool_response['type'] == 'asset-properties'
get_tool_response = asset_properties.get(entity_id_asset_properties)
assert get_tool_response['source'] == 'New test source'
def test_python_module_ctia_positive_asset_properties_search(
get_entity_response, get_entity):
"""Perform testing for asset properties/search entity of custom threat
intelligence python module
ID: CCTRI-2906 - 3246f737-e33d-4e60-b21f-3a85c28eddcf
Steps:
1. Send POST request to create new asset entity using custom python
module
2. Send GET request using custom python module to read just created
entity back.
3. Send POST request to create new asset properties entity using custom
python module
4. Send GET request using custom python module to read just created
entity back.
5. Count entities after entity created
6. Delete asset properties entity from the system
7. Repeat GET request using python module and validate that entity was
deleted
8. Count entities after entity deleted
9. Compare the amount of entities after creating and deleting entities
Expected results: Asset properties entity can be created, fetched, counted
and deleted using custom python module. Data stored in the entity is
the same no matter you access it directly or using our tool
Importance: Critical
"""
asset_post_tool_response = get_entity_response(
'asset', ASSET_PAYLOAD)
asset_ref = asset_post_tool_response['id']
asset_properties = get_entity('asset_properties')
# Create new asset properties entity using provided payload
payload_values_asset_properties = {
'asset_ref': asset_ref,
'valid_time': {
"start_time": "2021-07-27T07:55:38.193Z",
"end_time": "2021-07-27T07:55:38.193Z"},
'schema_version': asset_post_tool_response['schema_version'],
'source': 'test source',
'type': 'asset-properties',
'external_ids': ['3']
}
asset_properties_post_tool_response = asset_properties.post(
payload=payload_values_asset_properties, params={'wait_for': 'true'})
entity_id_asset_properties = \
asset_properties_post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_asset_properties_search = asset_properties.search.get(
params={'id': entity_id_asset_properties})
assert get_asset_properties_search[0]['type'] == 'asset-properties'
assert get_asset_properties_search[0]['source'] == 'test source'
# Count entities after entity created
count_asset_properties_before_deleted = asset_properties.search.count()
# Delete the entity and make attempt to get it back to validate it is
# not there anymore
delayed_return(asset_properties.search.delete(
params={'id': entity_id_asset_properties,
'REALLY_DELETE_ALL_THESE_ENTITIES': 'true'}))
# Repeat GET request and validate that entity was deleted
assert asset_properties.search.get(
params={'id': entity_id_asset_properties}) == []
# Count entities after entity deleted
count_asset_properties_after_deleted = asset_properties.search.count()
# Compare results of count_asset_properties_before_deleted and
# count_asset_properties_after_deleted
assert count_asset_properties_before_deleted != \
count_asset_properties_after_deleted
def test_python_module_ctia_positive_asset_properties_metric(
get_entity, get_entity_response):
"""Perform testing for asset properties/metric endpoints of custom threat
intelligence python module
ID: CCTRI-2906 -b3c835e4-4c5d-4d5d-95f6-45d3d7e350c3
Steps:
1. Send POST request to create new asset entity using custom python
module
2. Send GET request using custom python module to read just created
entity back.
3. Send POST request to create new asset properties entity using custom
python module
4. Send GET request using custom python module to read just created
entity back.
5. Send GET request to get type of metric/histogram endpoint
6. Send GET request to get type of metric/topn endpoint
7. Send GET request to get type of metric/cardinality endpoint
8. Delete created entity
9. Repeat GET request using python module and validate that entity was
deleted
Expected results: Asset properties entity can be created, fetched,
researched by metric's endpoints and deleted using custom python module.
Data stored in the entity is the same no matter you access it
directly or using our tool.
Importance: Critical
"""
asset_post_tool_response = get_entity_response(
'asset', ASSET_PAYLOAD)
asset_ref = asset_post_tool_response['id']
asset_properties = get_entity('asset_properties')
# Create new asset properties entity using provided payload
asset_properties_post_tool_response = get_entity_response(
'asset_properties', ASSET_PROPERTIES_PAYLOAD,
dict(asset_ref=asset_ref))
entity_id_asset_properties = \
asset_properties_post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_created_asset_properties = \
asset_properties.get(entity_id_asset_properties)
assert get_created_asset_properties['type'] == 'asset-properties'
assert get_created_asset_properties['source'] == 'test source'
# Send GET request to get type of metric/histogram endpoint
data_from = get_created_asset_properties['timestamp']
metric_histogram = asset_properties.metric.histogram(
params={'granularity': 'week', 'from': data_from,
'aggregate-on': 'timestamp'})
assert metric_histogram['type'] == 'histogram'
# Send GET request to get type of metric/topn endpoint
metric_topn = asset_properties.metric.topn(params={
'from': data_from, 'aggregate-on': 'source'})
assert metric_topn['type'] == 'topn'
# Send GET request to get type of metric/cardinality endpoint
metric_cardinality = asset_properties.metric.cardinality(
params={'from': data_from, 'aggregate-on': 'source'})
assert metric_cardinality['type'] == 'cardinality'
def test_python_module_ctia_positive_attack_pattern(
module_headers, get_entity, get_entity_response):
"""Perform testing for attack pattern entity of custom threat intelligence
python module
ID: CCTRI-160-86d8f8ef-fbf4-4bf4-88c2-a57f4fe6b866
Steps:
1. Send POST request to create new attack pattern entity using custom
python module
2. Send GET request using custom python module to read just created
entity back.
3. Send same GET request, but using direct access to the server
4. Compare results
5. Validate that GET request of external_id returns number of
external_id
6. Update attack pattern entity using custom python module
7. Repeat GET request using python module and validate that entity was
updated
Expected results: Attack pattern entity can be created, fetched, updated
and deleted using custom python module. Data stored in the entity is
the same no matter you access it directly or using our tool
Importance: Critical
"""
attack_pattern = get_entity('attack_pattern')
attack_pattern_post_tool_response = get_entity_response(
'attack_pattern', ATTACK_PATTERN_PAYLOAD)
values = {
key: attack_pattern_post_tool_response[key] for key in [
'description',
'schema_version',
'type',
'short_description',
'source',
'title',
'external_ids'
]
}
assert values == ATTACK_PATTERN_PAYLOAD
entity_id = attack_pattern_post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_tool_response = attack_pattern.get(entity_id)
get_direct_response = ctia_get_data(
target_url=ATTACK_PATTERN,
entity_id=entity_id,
**{'headers': module_headers}
).json()
assert get_tool_response == get_direct_response
# Validate that GET request of external_id returns number of external_id
external_id_result = attack_pattern.external_id(3)
assert external_id_result[0]['external_ids'] == ['3']
# Update entity values
put_tool_response = delayed_return(
attack_pattern.put(
id_=entity_id,
payload=PUT_ATTACK_PATTERN_PAYLOAD
)
)
assert put_tool_response['short_description'] == 'Updated descr'
get_tool_response = attack_pattern.get(entity_id)
assert get_tool_response['short_description'] == 'Updated descr'
def test_python_module_ctia_positive_attack_pattern_search(get_entity):
"""Perform testing for attack_pattern/search entity of custom threat
intelligence python module
ID: CCTRI-2848 - 642bcca5-3eec-4955-b395-e4c365b65bf5
Steps:
1. Send POST request to create new attack_pattern entity using
custom python module
2. Send GET request using custom python module to read just created
entity back.
3. Count entities after entity created
4. Delete entity from the system
5. Repeat GET request using python module and validate that entity was
deleted
6. Count entities after entity deleted
7. Compare the amount of entities after creating and deleting entities
Expected results: Attack_pattern entity can be created, fetched, counted
and deleted using custom python module. Data stored in the entity is
the same no matter you access it directly or using our tool
Importance: Critical
"""
attack_pattern = get_entity('attack_pattern')
payload = {
'description': (
'A boot kit is a malware variant that modifies the boot sectors of'
' a hard drive'
),
'schema_version': '1.1.3',
'type': 'attack-pattern',
'short_description': 'desc for test',
'source': 'new source point',
'title': 'for test'
}
# Create new entity using provided payload
post_tool_response = attack_pattern.post(payload=payload,
params={'wait_for': 'true'})
entity_id = post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_attack_pattern_search = attack_pattern.search.get(
params={'id': entity_id})
assert get_attack_pattern_search[0]['type'] == 'attack-pattern'
assert get_attack_pattern_search[0]['schema_version'] == '1.1.3'
# Count entities after entity created
count_attack_pattern_before_deleted = attack_pattern.search.count()
# Delete the entity and make attempt to get it back to validate it is
# not there anymore
delayed_return(attack_pattern.search.delete(params={
'id': entity_id, 'REALLY_DELETE_ALL_THESE_ENTITIES': 'true'}))
# Repeat GET request and validate that entity was deleted
assert attack_pattern.search.get(params={'id': entity_id}) == []
# Count entities after entity deleted
count_attack_pattern_after_deleted = attack_pattern.search.count()
# Compare results of count_attack_pattern_before_deleted
# and count_attack_pattern_after_deleted
assert count_attack_pattern_before_deleted !=\
count_attack_pattern_after_deleted
def test_python_module_ctia_positive_attack_pattern_metric(
get_entity, get_entity_response):
"""Perform testing for attack_pattern/metric endpoints of custom threat
intelligence python module
ID: CCTRI-2848 -1b6c327c-cf55-4e22-a72c-93f9ad4b2763
Steps:
1. Send POST request to create new attack_pattern entity using
custom python module
2. Send GET request using custom python module to read just created
entity back.
3. Send GET request to get type of metric/histogram endpoint
4. Send GET request to get type of metric/topn endpoint
5. Send GET request to get type of metric/cardinality endpoint
Expected results: Attack_pattern entity can be created, fetched,
researched by metric's endpoints and deleted using custom python module.
Data stored in the entity is the same no matter you access it
directly or using our tool.
Importance: Critical
"""
attack_pattern = get_entity('attack_pattern')
get_attack_pattern_response = get_entity_response(
'attack_pattern', ATTACK_PATTERN_PAYLOAD)
attack_pattern_post_tool_response = get_attack_pattern_response
entity_id = attack_pattern_post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_created_attack_pattern = attack_pattern.get(entity_id)
assert get_created_attack_pattern['type'] == 'attack-pattern'
assert get_created_attack_pattern['schema_version'] == '1.1.3'
# Send GET request to get type of metric/histogram endpoint
data_from = get_created_attack_pattern['timestamp']
metric_histogram = attack_pattern.metric.histogram(params={
'granularity': 'week', 'from': data_from, 'aggregate-on': 'timestamp'})
assert metric_histogram['type'] == 'histogram'
# Send GET request to get type of metric/topn endpoint
metric_topn = attack_pattern.metric.topn(params={
'from': data_from, 'aggregate-on': 'source'})
assert metric_topn['type'] == 'topn'
# Send GET request to get type of metric/cardinality endpoint
metric_cardinality = attack_pattern.metric.cardinality(params={
'from': data_from, 'aggregate-on': 'source'})
assert metric_cardinality['type'] == 'cardinality'
def test_python_module_ctia_positive_bulk(module_headers, get_entity):
"""Perform testing for bulk functionality of custom threat intelligence
python module
ID: CCTRI-165-7db40d60-9767-47d2-98a5-e734562fa9f1
Steps:
1. Send POST request to create one campaign entity and one coa entity
in a bulk using custom python module
2. Send GET request using custom python module and bulk functionality
to read just created entities back.
3. Validate response
4. Send GET request, but using usual single entity endpoint with custom
python module
5. Send same GET request, but with direct access to the server
6. Compare results
Expected results: Bulk functionality works properly and some entities can
be created in the same time using custom python module
Importance: Critical
"""
bulk = get_entity('bulk')
campaign = get_entity('campaign')
# Create Campaign and COA entities in bulk
post_tool_response = delayed_return(
bulk.post({
"coas": [COA_PAYLOAD], "campaigns": [CAMPAIGN_PAYLOAD]},
)
)
assert len(post_tool_response['campaigns']) > 0
assert len(post_tool_response['coas']) > 0
campaign_entity_id = post_tool_response['campaigns'][0].rpartition('/')[-1]
# Verify that GET request using bulk functionality return valid data
get_tool_response = bulk.get(params={'campaigns': [campaign_entity_id]})
values = {
key: get_tool_response['campaigns'][0][key] for key in [
'campaign_type',
'confidence',
'type',
'schema_version',
'description',
'short_description',
'title'
]
}
assert values == CAMPAIGN_PAYLOAD
# Validate that GET request return same data for direct access and access
# through custom python module for entity that was created using bulk
# functionality
get_tool_response = campaign.get(campaign_entity_id)
get_direct_response = ctia_get_data(
target_url=CAMPAIGN,
entity_id=campaign_entity_id,
**{'headers': module_headers}
).json()
assert get_tool_response == get_direct_response
def test_python_module_ctia_positive_bundle(
module_headers, get_entity, get_entity_response):
"""Perform testing for bundle functionality of custom threat intelligence
python module
ID: CCTRI-172-f483fa82-f308-4606-9045-ffc2dc8b41f0
Steps:
1. Send POST request to create one incident entity to be used for
bundle functionality
2. Send POST request to create one indicator entity to be used for
bundle functionality
3. Send POST request to export data using bundle functionality
4. Send POST request to import data using bundle functionality
Expected results: Bundle functionality works properly and some entities can
be imported or exported using custom python module
Importance: Critical
"""
# Prepare data for incident
incident_post_tool_response =\
get_entity_response('incident', INCIDENT_PAYLOAD)
# Create new indicator using provided payload
indicator_post_tool_response =\
get_entity_response('indicator', INDICATOR_PAYLOAD)
# Use created entities for bundle
bundle = get_entity('bundle')
payload = {
'ids': [
incident_post_tool_response['id'],
indicator_post_tool_response['id']
]
}
# Validate export endpoint
post_tool_response = bundle.export.post(payload=payload)
assert post_tool_response['type'] == 'bundle'
assert post_tool_response['source'] == 'ctia'
assert post_tool_response['incidents'][0]['id'] == (
incident_post_tool_response['id']
)
assert post_tool_response['indicators'][0]['id'] == (
indicator_post_tool_response['id']
)
# Validate import endpoint
payload = {
'schema_version': indicator_post_tool_response['schema_version'],
'type': 'bundle',
'source': 'random source',
}
post_tool_response = bundle.import_.post(payload=payload)
assert post_tool_response
def test_python_module_ctia_positive_campaign(
module_headers, get_entity, get_entity_response):
"""Perform testing for campaign entity of custom threat intelligence python
module
ID: CCTRI-161-0bb11c77-5b26-43cb-841a-b18f0fa0563c
Steps:
1. Send POST request to create new campaign entity using custom python
module
2. Send GET request using custom python module to read just created
entity back.
3. Send same GET request, but using direct access to the server
4. Compare results
5. Update campaign entity using custom python module
6. Repeat GET request using python module and validate that entity was
updated
7. Send SEARCH request using custom python module to find entity and
validate proper values are returned
Expected results: Campaign entity can be created, fetched, updated,
searched and deleted using custom python module. Data stored in the
entity is the same no matter you access it directly or using our tool
Importance: Critical
"""
campaign = get_entity('campaign')
campaign_post_tool_response = get_entity_response(
'campaign', CAMPAIGN_PAYLOAD)
values = {
key: campaign_post_tool_response[key] for key in [
'title',
'campaign_type',
'confidence',
'type',
'schema_version',
'description',
'short_description'
]
}
assert values == CAMPAIGN_PAYLOAD
entity_id = campaign_post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_tool_response = campaign.get(entity_id)
get_direct_response = ctia_get_data(
target_url=CAMPAIGN,
entity_id=entity_id,
**{'headers': module_headers}
).json()
assert get_tool_response == get_direct_response
# Update entity values
put_tool_response = delayed_return(
campaign.put(
id_=entity_id,
payload=PUT_CAMPAIGN_PAYLOAD
)
)
assert put_tool_response['title'] == 'New demo campaign'
get_tool_response = campaign.get(entity_id)
assert get_tool_response['title'] == 'New demo campaign'
# Search for campaign by entity id
search_tool_response = campaign.search.get(params={
'query': 'id:*{}'.format(entity_id)})
# We got exactly one entry for provided unique entity id
assert len(search_tool_response) == 1
assert search_tool_response[0]['title'] == 'New demo campaign'
def test_python_module_ctia_positive_campaign_search(get_entity):
"""Perform testing for campaign/search entity of custom threat
intelligence python module
ID: CCTRI-2848 - b65fb933-d81b-4189-abbb-849fc2deef06
Steps:
1. Send POST request to create new campaign entity using
custom python module
2. Send GET request using custom python module to read just created
entity back.
3. Count entities after entity created
4. Delete entity from the system
5. Repeat GET request using python module and validate that entity was
deleted
6. Count entities after entity deleted
7. Compare the amount of entities after creating and deleting entities
Expected results: Campaign entity can be created, fetched, counted and
deleted using custom python module. Data stored in the entity is
the same no matter you access it directly or using our tool
Importance: Critical
"""
campaign = get_entity('campaign')
# Create new entity using provided payload
post_tool_response = campaign.post(payload=CAMPAIGN_PAYLOAD,
params={'wait_for': 'true'})
entity_id = post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_campaign_search = campaign.search.get(
params={'id': entity_id})
assert get_campaign_search[0]['type'] == 'campaign'
assert get_campaign_search[0]['schema_version'] == '1.1.3'
# Count entities after entity created
count_campaign_before_deleted = campaign.search.count()
# Delete the entity and make attempt to get it back to validate it is
# not there anymore
delayed_return(campaign.search.delete(params={
'id': entity_id, 'REALLY_DELETE_ALL_THESE_ENTITIES': 'true'}))
# Repeat GET request and validate that entity was deleted
assert campaign.search.get(params={'id': entity_id}) == []
# Count entities after entity deleted
count_campaign_after_deleted = campaign.search.count()
# Compare results of count_campaign_before_deleted
# and count_campaign_after_deleted
assert count_campaign_before_deleted != count_campaign_after_deleted
def test_python_module_ctia_positive_campaign_metric(
get_entity, get_entity_response):
"""Perform testing for campaign/metric endpoints of custom threat
intelligence python module
ID: CCTRI-2848 -b11cbee0-a3e5-4a19-8b4a-d3d16e7bfb5c
Steps:
1. Send POST request to create new campaign entity using
custom python module
2. Send GET request using custom python module to read just created
entity back.
3. Send GET request to get type of metric/histogram endpoint
4. Send GET request to get type of metric/topn endpoint
5. Send GET request to get type of metric/cardinality endpoint
6. Delete created entity
7. Repeat GET request using python module and validate that entity was
deleted
Expected results: Campaign entity can be created, fetched,
researched by metric's endpoints and deleted using custom python module.
Data stored in the entity is the same no matter you access it
directly or using our tool.
Importance: Critical
"""
campaign = get_entity('campaign')
post_tool_tool_response = get_entity_response('campaign', CAMPAIGN_PAYLOAD)
entity_id = post_tool_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_created_campaign = campaign.get(entity_id)
assert get_created_campaign['type'] == 'campaign'
assert get_created_campaign['schema_version'] == '1.1.3'
# Send GET request to get type of metric/histogram endpoint
data_from = get_created_campaign['timestamp']
metric_histogram = campaign.metric.histogram(params={
'granularity': 'week', 'from': data_from, 'aggregate-on': 'timestamp'})
assert metric_histogram['type'] == 'histogram'
# Send GET request to get type of metric/topn endpoint
metric_topn = campaign.metric.topn(params={
'from': data_from, 'aggregate-on': 'source'})
assert metric_topn['type'] == 'topn'
# Send GET request to get type of metric/cardinality endpoint
metric_cardinality = campaign.metric.cardinality(params={
'from': data_from, 'aggregate-on': 'source'})
assert metric_cardinality['type'] == 'cardinality'
def test_python_module_ctia_positive_casebook(
module_headers, get_entity, get_entity_response):
"""Perform testing for casebook entity of custom threat intelligence python
module
ID: CCTRI-165-d6fb1e17-324f-4de8-a388-2d6ab33dd071
Steps:
1. Send POST request to create new casebook entity using custom python
module
2. Send GET request using custom python module to read just created
entity back.
3. Send same GET request, but using direct access to the server
4. Validate that GET request of external_id returns number of
external_id
5. Compare results
6. Add new observable entity to the casebook
7. Send GET request to validate that observable was actually added
8. Validate that POST request of casebook.texts returns created
text and type
9. Update casebook entity using custom python module
10. Repeat GET request using python module and validate that entity was
updated
11. Use Patch endpoint for updating updated entity
Expected results: Casebook entity can be created, fetched, updated and
deleted using custom python module. Data stored in the entity is
the same no matter you access it directly or using our tool
Importance: Critical
"""
# Create new entity using provided payload
casebook = get_entity('casebook')
casebook_post_tool_response = get_entity_response(
'casebook', CASEBOOK_PAYLOAD)
values = {
key: casebook_post_tool_response[key] for key in [
'type',
'title',
'short_description',
'description',
'observables',
'timestamp',
'external_ids'
]
}
assert values == CASEBOOK_PAYLOAD
entity_id = casebook_post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_tool_response = casebook.get(entity_id)
get_direct_response = ctia_get_data(
target_url=CASEBOOK,
entity_id=entity_id,
**{'headers': module_headers}
).json()
assert get_tool_response == get_direct_response
# Add one observable to casebook using special endpoint for this purpose
delayed_return(
casebook.observables(
entity_id,
{
'operation': 'add',
'observables': casebook_post_tool_response['observables']
}
)
)
get_tool_response = casebook.get(entity_id)
assert get_tool_response['observables'][0] ==\
casebook_post_tool_response['observables'][0]
# Validate that GET request of external_id returns number of external_id
external_id_result = casebook.external_id(3)
assert external_id_result[0]['external_ids'] == ['3']
# Validate that POST request of casebook.texts returns created
# text and type
payload_for_texts = {
"operation": "remove",
"texts": [
{
"type": "test type",
"text": "test text"
}
]
}
added_texts_data = casebook.texts(entity_id, payload=payload_for_texts)
assert added_texts_data['texts'][0]['type'] == 'test type'
assert added_texts_data['texts'][0]['text'] == 'test text'
# Update entity values
put_tool_response = delayed_return(
casebook.put(
id_=entity_id,
payload={'short_description': 'Updated description'}
)
)
assert put_tool_response['short_description'] == 'Updated description'
get_tool_response = casebook.get(entity_id)
assert get_tool_response['short_description'] == 'Updated description'
# Use Patch endpoint for updating updated entity
patch_tool_response = casebook.patch(entity_id,
payload=CASEBOOK_PATCH_PAYLOAD,
params={'wait_for': 'true'})
assert patch_tool_response['short_description'] == 'Patched Casebook'
assert patch_tool_response['description'] == 'Patched entity'
assert patch_tool_response['title'] == 'Case November, 2021 0:00 PM'
def test_python_module_ctia_positive_casebook_bundle(
module_headers, get_entity, get_entity_response):
"""Perform testing for casebook entity of custom threat intelligence python
module
ID: CCTRI-2968 -11e8a791-5496-4831-af75-1823fb572e02
Steps:
1. Send POST request to create new casebook entity using custom python
module
2. Send GET request using custom python module to read just created
entity back.
3. Send same GET request, but using direct access to the server
4. Validate that GET request of external_id returns number of
external_id
5. Send POST request to create casebook bundle entity using custom
python module
6. Delete casebook entity from the system
Expected results: Casebook bundle entity can be created and deleted using
custom python module. Data stored in the entity is the same no matter
you access it directly or using our tool
Importance: Critical
"""
casebook = get_entity('casebook')
# Create new casebook entity using provided payload
casebook_post_tool_response = get_entity_response(
'casebook', CASEBOOK_PAYLOAD)
values = {
key: casebook_post_tool_response[key] for key in [
'type',
'title',
'short_description',
'description',
'observables',
'timestamp',
'external_ids'
]
}
assert values == CASEBOOK_PAYLOAD
entity_id = casebook_post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_tool_response = casebook.get(entity_id)
get_direct_response = ctia_get_data(
target_url=CASEBOOK,
entity_id=entity_id,
**{'headers': module_headers}
).json()
assert get_tool_response == get_direct_response
payload_for_bundle = {
"operation": "add",
"bundle": {
"description": "string",
"valid_time": {
"start_time": "2021-08-26T11:48:51.490Z",
"end_time": "2021-08-26T11:48:51.490Z"
},
"schema_version": "1.1.3",
"type": "bundle",
"source": "Source For bundle",
"short_description": "Bundle description",
"title": "Title for test",
"id": casebook_post_tool_response['id']
}
}
bundle_tool_response = casebook.bundle(entity_id,
payload=payload_for_bundle)
assert bundle_tool_response['description'] ==\
'New Casebook for malicious tickets'
assert bundle_tool_response['schema_version'] == '1.1.3'
assert bundle_tool_response['type'] == 'casebook'
def test_python_module_ctia_positive_casebook_search(get_entity):
"""Perform testing for casebook/search entity of custom threat
intelligence python module
ID: CCTRI-2848 - 90719039-6d18-49cf-87fb-739e695be1fd
Steps:
1. Send POST request to create new casebook entity using
custom python module
2. Send GET request using custom python module to read just created
entity back.
3. Count entities after entity created
4. Delete entity from the system
5. Repeat GET request using python module and validate that entity was
deleted
6. Count entities after entity deleted
7. Compare the amount of entities after creating and deleting entities
Expected results: Casebook entity can be created, fetched, counted and
deleted using custom python module. Data stored in the entity is
the same no matter you access it directly or using our tool
Importance: Critical
"""
casebook = get_entity('casebook')
observable = [{'value': 'instanbul.com', 'type': 'domain'}]
payload = {
'type': 'casebook',
'title': 'Case September 24, 2019 2:34 PM',
'short_description': 'New Casebook',
'description': 'New Casebook for malicious tickets',
'observables': observable,
'timestamp': '2019-09-24T11:34:18.000Z'
}
# Create new entity using provided payload
post_tool_response = casebook.post(payload=payload,
params={'wait_for': 'true'})
values = {
key: post_tool_response[key] for key in [
'type',
'title',
'short_description',
'description',
'observables',
'timestamp'
]
}
assert values == payload
entity_id = post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_casebook_search = casebook.search.get(
params={'id': entity_id})
assert get_casebook_search[0]['type'] == 'casebook'
assert get_casebook_search[0]['schema_version'] == '1.1.3'
# Count entities after entity created
count_casebook_before_deleted = casebook.search.count()
# Delete the entity and make attempt to get it back to validate it is
# not there anymore
delayed_return(casebook.search.delete(params={
'id': entity_id, 'REALLY_DELETE_ALL_THESE_ENTITIES': 'true'}))
# Repeat GET request and validate that entity was deleted
assert casebook.search.get(params={'id': entity_id}) == []
# Count entities after entity deleted
count_casebook_after_deleted = casebook.search.count()
# Compare results of count_casebook_before_deleted
# and count_casebook_after_deleted
assert count_casebook_before_deleted != count_casebook_after_deleted
def test_python_module_ctia_positive_casebook_metric(
get_entity, get_entity_response):
"""Perform testing for casebook/metric endpoints of custom threat
intelligence python module
ID: CCTRI-2848 -e5f86888-5cab-4048-ae5a-92220db88497
Steps:
1. Send POST request to create new casebook entity using
custom python module
2. Send GET request using custom python module to read just created
entity back.
3. Send GET request to get type of metric/histogram endpoint
4. Send GET request to get type of metric/topn endpoint
5. Send GET request to get type of metric/cardinality endpoint
Expected results: Casebook entity can be created, fetched,
researched by metric's endpoints and deleted using custom python module.
Data stored in the entity is the same no matter you access it
directly or using our tool.
Importance: Critical
"""
casebook = get_entity('casebook')
casebook_post_tool_response = get_entity_response(
'casebook', CASEBOOK_PAYLOAD)
entity_id = casebook_post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_created_casebook = casebook.get(entity_id)
assert get_created_casebook['type'] == 'casebook'
assert get_created_casebook['schema_version'] == '1.1.3'
# Send GET request to get type of metric/histogram endpoint
data_from = get_created_casebook['timestamp']
metric_histogram = casebook.metric.histogram(params={
'granularity': 'week', 'from': data_from, 'aggregate-on': 'timestamp'})
assert metric_histogram['type'] == 'histogram'
# Send GET request to get type of metric/topn endpoint
metric_topn = casebook.metric.topn(params={
'from': data_from, 'aggregate-on': 'source'})
assert metric_topn['type'] == 'topn'
# Send GET request to get type of metric/cardinality endpoint
metric_cardinality = casebook.metric.cardinality(params={
'from': data_from, 'aggregate-on': 'source'})
assert metric_cardinality['type'] == 'cardinality'
def test_python_module_ctia_positive_coa(
module_headers, get_entity, get_entity_response):
"""Perform testing for coa entity of custom threat intelligence python
module
ID: CCTRI-161-03b73a5e-b919-4e94-8828-c388e1ba211e
Steps:
1. Send POST request to create new coa entity using custom python
module
2. Send GET request using custom python module to read just created
entity back.
3. Send same GET request, but using direct access to the server
4. Compare results
5. Validate that GET request of external_id returns number of
external_id
6. Update coa entity using custom python module
7. Repeat GET request using python module and validate that entity was
updated
Expected results: COA entity can be created, fetched, updated and
deleted using custom python module. Data stored in the entity is
the same no matter you access it directly or using our tool
Importance: Critical
"""
coa = get_entity('coa')
coa_post_tool_response = get_entity_response('coa', COA_PAYLOAD)
values = {
key: coa_post_tool_response[key] for key in [
'description',
'coa_type',
'type',
'schema_version',
'short_description',
'title',
'external_ids'
]
}
assert values == COA_PAYLOAD
entity_id = coa_post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_tool_response = coa.get(entity_id)
get_direct_response = ctia_get_data(
target_url=COA,
entity_id=entity_id,
**{'headers': module_headers}
).json()
assert get_tool_response == get_direct_response
# Validate that GET request of external_id returns number of external_id
external_id_result = coa.external_id(3)
assert external_id_result[0]['external_ids'] == ['3']
# Update entity values
put_tool_response = delayed_return(
coa.put(
id_=entity_id,
payload={'description': 'New COA description'}
)
)
assert put_tool_response['description'] == 'New COA description'
get_tool_response = coa.get(entity_id)
assert get_tool_response['description'] == 'New COA description'
def test_python_module_ctia_positive_coa_search(get_entity):
"""Perform testing for coa/search entity of custom threat
intelligence python module
ID: CCTRI-2848 - 5bd4220c-f91f-407d-9b3b-c436d8dc5c3f
Steps:
1. Send POST request to create new coa entity using
custom python module
2. Send GET request using custom python module to read just created
entity back.
3. Count entities after entity created
4. Delete entity from the system
5. Repeat GET request using python module and validate that entity was
deleted
6. Count entities after entity deleted
7. Compare the amount of entities after creating and deleting entities
Expected results: COA entity can be created, fetched, counted and
deleted using custom python module. Data stored in the entity is
the same no matter you access it directly or using our tool
Importance: Critical
"""
coa = get_entity('coa')
# Create new entity using provided payload
coa_post_tool_response = coa.post(payload=COA_PAYLOAD,
params={'wait_for': 'true'})
entity_id = coa_post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_coa_search = coa.search.get(
params={'id': entity_id})
assert get_coa_search[0]['type'] == 'coa'
assert get_coa_search[0]['schema_version'] == '1.1.3'
# Count entities after entity created
count_coa_before_deleted = coa.search.count()
# Delete the entity and make attempt to get it back to validate it is
# not there anymore
delayed_return(coa.search.delete(params={
'id': entity_id, 'REALLY_DELETE_ALL_THESE_ENTITIES': 'true'}))
# Repeat GET request and validate that entity was deleted
assert coa.search.get(params={'id': entity_id}) == []
# Count entities after entity deleted
count_coa_after_deleted = coa.search.count()
# Compare results of count_coa_before_deleted
# and count_coa_after_deleted
assert count_coa_before_deleted != count_coa_after_deleted
def test_python_module_ctia_positive_coa_metric(
get_entity, get_entity_response):
"""Perform testing for coa/metric endpoints of custom threat
intelligence python module
ID: CCTRI-2848 -73e26197-527f-437b-9ad8-eb5cd34761ed
Steps:
1. Send POST request to create new coa entity using
custom python module
2. Send GET request using custom python module to read just created
entity back.
3. Send GET request to get type of metric/histogram endpoint
4. Send GET request to get type of metric/topn endpoint
5. Send GET request to get type of metric/cardinality endpoint
Expected results: COA entity can be created, fetched,
researched by metric's endpoints and deleted using custom python module.
Data stored in the entity is the same no matter you access it
directly or using our tool.
Importance: Critical
"""
coa = get_entity('coa')
post_tool_response = get_entity_response('coa', COA_PAYLOAD)
entity_id = post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_created_coa = coa.get(entity_id)
assert get_created_coa['type'] == 'coa'
assert get_created_coa['schema_version'] == '1.1.3'
# Send GET request to get type of metric/histogram endpoint
data_from = get_created_coa['timestamp']
metric_histogram = coa.metric.histogram(params={
'granularity': 'week', 'from': data_from, 'aggregate-on': 'timestamp'})
assert metric_histogram['type'] == 'histogram'
# Send GET request to get type of metric/topn endpoint
metric_topn = coa.metric.topn(params={
'from': data_from, 'aggregate-on': 'source'})
assert metric_topn['type'] == 'topn'
# Send GET request to get type of metric/cardinality endpoint
metric_cardinality = coa.metric.cardinality(params={
'from': data_from, 'aggregate-on': 'source'})
assert metric_cardinality['type'] == 'cardinality'
def test_python_module_ctia_positive_data_table(
module_headers, get_entity, get_entity_response):
"""Perform testing for data table entity of custom threat intelligence
python module
ID: CCTRI-161-c89f865b-c070-446f-a052-8fae73c4d564
Steps:
1. Send POST request to create new data table entity using custom
python module
2. Send GET request using custom python module to read just created
entity back.
3. Send same GET request, but using direct access to the server
4. Compare results
Expected results: Data table entity can be created, fetched and deleted
using custom python module. Data stored in the entity is the same no
matter you access it directly or using our tool
Importance: Critical
"""
data_table = get_entity('data_table')
# Create new entity using provided payload
post_tool_response = get_entity_response('data_table', DATA_TABLE_PAYLOAD)
values = {
key: post_tool_response[key] for key in [
'columns',
'rows',
'type',
'schema_version'
]
}
assert values == DATA_TABLE_PAYLOAD
entity_id = post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_tool_response = data_table.get(entity_id)
get_direct_response = ctia_get_data(
target_url=DATA_TABLE,
entity_id=entity_id,
**{'headers': module_headers}
).json()
assert get_tool_response == get_direct_response
def test_python_module_ctia_positive_event(get_entity):
"""Perform testing for event entity of custom threat intelligence python
module
ID: CCTRI-162-b3ecaf2b-7d15-43a5-80bb-879f4a2ce34b
Steps:
1. Send SEARCH request to server to get random event entity id
2. Send GET request to server using that id
3. Validate returned data contains information about event
Expected results: Requests sent successfully and got valid response
from server
Importance: Critical
"""
event = get_entity('event')
entities_list = event.search.get(params={'query': '*'})
assert len(entities_list) > 0
entity = random.choice(entities_list)
assert entity['type'] == 'event'
get_tool_response = event.get(entity['id'].rpartition('/')[-1])
assert get_tool_response['type'] == 'event'
assert get_tool_response['timestamp']
def test_python_module_ctia_positive_event_search(get_entity):
"""Perform testing for event/search entity of custom threat
intelligence python module
ID: CCTRI-2906 - 363a43d4-1862-4eed-aecb-3d011804642d
Steps:
1. Send GET request using custom python module to read entities.
2. Count entities after entity created
3. Delete entity from the system
4. Count entities after entity deleted
5. Compare the amount of entities after creating and deleting entities
6. Send GET request using custom python module to read entities by id.
7. Delete entity from the system using id of event
Expected results: Event entity can be fetched, counted using custom python
module. Event can not be deleted. Data stored in the entity is
the same no matter you access it directly or using our tool
Importance: Critical
"""
event = get_entity('event')
# Validate that GET request return same data for direct access and access
# through custom python module
event_search = event.search.get()
assert event_search[1]['type'] == 'event'
entity_id = event_search[1]['id']
# Count entities after entity created
count_event_before_deleted = event.search.count()
# Delete the entity and make attempt to get it back to validate it is
# not there anymore
deleting_response = None
try:
event.search.delete(params={
'id': entity_id, 'REALLY_DELETE_ALL_THESE_ENTITIES': 'true',
'wait_for': 'true'})
except HTTPError as error:
deleting_response = error
assert deleting_response.response.status_code == 403
json_string = deleting_response.response.text
parsed_text_response = json.loads(json_string)
assert parsed_text_response['message'] == 'Missing capability'
assert parsed_text_response['error'] == 'missing_capability'
assert parsed_text_response['capabilities'][0] == 'search-event'
assert parsed_text_response['capabilities'][1] == 'developer'
assert parsed_text_response['capabilities'][2] == 'delete-event'
# Count entities after entity deleted
count_event_after_deleted = event.search.count()
# Compare results of count_event_before_deleted
# and count_event_after_deleted
assert count_event_before_deleted == count_event_after_deleted
# Validate that GET request return data of event by id.
event_search_by_id = event.get(entity_id)
assert event_search_by_id['type'] == 'event'
assert event_search_by_id['id'] == entity_id
# Delete the entity and make attempt to get it back to validate it is
# not there anymore
by_id_deleting_response = None
try:
event.delete(entity_id)
except HTTPError as error:
by_id_deleting_response = error
assert by_id_deleting_response.response.status_code == 403
json_string = by_id_deleting_response.response.text
parsed_text_response = json.loads(json_string)
assert parsed_text_response['message'] == 'Missing capability'
assert parsed_text_response['error'] == 'missing_capability'
assert parsed_text_response['capabilities'][0] == 'developer'
assert parsed_text_response['capabilities'][1] == 'delete-event'
def test_python_module_ctia_positive_feed(
module_headers, get_entity, get_entity_response):
"""Perform testing for feed entity of custom threat intelligence python
module
ID: CCTRI-906-e0114e1d-bfad-4776-810c-66ca351027d7
Steps:
1. Send POST request to create one judgement entity with one observable
2. Send POST request to create one indicator entity to be used for
feed functionality
3. Send POST request to create new relationship between judgement and
indicator
4. Send POST request to create new feed entity using custom python
module
5. Send GET request using custom python module to read just created
entity back.
5. Send same GET request, but using direct access to the server
6. Compare results
7. Update relationship entity using custom python module
8. Repeat GET request using python module and validate that entity was
updated
9. Send GET request using custom python module to read view endpoint
10. Send GET request using custom python module to read view txt
endpoint
11. Delete entity from the system
Expected results: Feed entity can be created, fetched, updated and
deleted using custom python module. Data stored in the entity is
the same no matter you access it directly or using our tool
Importance: Critical
"""
judgement_post_tool_response = get_entity_response(
'judgement', JUDGEMENT_PAYLOAD)
# Prepare data for indicator
indicator_post_tool_response = get_entity_response(
'indicator', INDICATOR_PAYLOAD)
# Use created entities for relationship
relationship_post_tool_response = get_entity_response(
'relationship', RELATIONSHIP_PAYLOAD,
dict(source_ref=judgement_post_tool_response['id'],
target_ref=indicator_post_tool_response['id']))
assert relationship_post_tool_response['type'] == 'relationship'
assert relationship_post_tool_response['description'] == 'Test relation'
feed = get_entity('feed')
feed_post_tool_response = get_entity_response(
'feed', FEED_PAYLOAD,
dict(indicator_id=indicator_post_tool_response['id']))
# Create new entity using provided payload
values = {
key: feed_post_tool_response[key] for key in [
'schema_version',
'revision',
'output',
'type',
'feed_type',
'indicator_id',
'external_ids'
]
}
assert values == FEED_PAYLOAD
entity_id = feed_post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_tool_response = feed.get(entity_id)
get_direct_response = ctia_get_data(
target_url=FEED,
entity_id=entity_id,
**{'headers': module_headers}
).json()
assert get_tool_response == get_direct_response
# Validate that GET request of external_id returns number of external_id
external_id_result = feed.external_id(3)
assert external_id_result[0]['external_ids'] == ['3']
# Update entity values
put_tool_response = delayed_return(
feed.put(
id_=entity_id,
payload={
"revision": 1,
"indicator_id": indicator_post_tool_response['id'],
"type": "feed",
"output": "observables",
"feed_type": "indicator",
}
)
)
assert put_tool_response['revision'] == 1
get_tool_response = feed.get(entity_id)
assert get_tool_response['revision'] == 1
# Get information from feed view endpoint
assert feed.view(entity_id, get_tool_response['secret']) == (
{'observables': [judgement_post_tool_response['observable']]}
)
# Get information from feed view text endpoint
assert feed.view.txt(entity_id, get_tool_response['secret']) ==\
judgement_post_tool_response['observable']['value']
def test_python_module_ctia_positive_feed_search(get_entity):
"""Perform testing for feed/search entity of custom threat
intelligence python module
ID: CCTRI-2885 - 8813aaa2-43fc-430d-ab1b-6eb40c2a9394
Steps:
1. Send POST request to create new feed entity using
custom python module
2. Send GET request using custom python module to read just created
entity back.
3. Count entities after entity created
4. Delete entity from the system
5. Repeat GET request using python module and validate that entity was
deleted
6. Count entities after entity deleted
7. Compare the amount of entities after creating and deleting entities
Expected results: feed entity can be created, fetched,
counted and deleted using custom python module. Data stored in the entity
is the same no matter you access it directly or using our tool
Importance: Critical
"""
feed = get_entity('feed')
# Create new entity using provided payload
post_tool_response = feed.post(
payload=FEED_PAYLOAD, params={'wait_for': 'true'})
entity_id = post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_feed_search = feed.search.get(
params={'id': entity_id})
assert get_feed_search[0]['type'] == 'feed'
assert get_feed_search[0]['schema_version'] == '1.1.3'
# Count entities after entity created
count_feed_before_deleted = feed.search.count()
# Delete the entity and make attempt to get it back to validate it is
# not there anymore
delayed_return(feed.search.delete(
params={'id': entity_id, 'REALLY_DELETE_ALL_THESE_ENTITIES': 'true'}))
# Repeat GET request and validate that entity was deleted
assert feed.search.get(params={'id': entity_id}) == []
# Count entities after entity deleted
count_feed_after_deleted = feed.search.count()
# Compare results of count_feed_before_deleted
# and count_feed_after_deleted
assert count_feed_before_deleted !=\
count_feed_after_deleted
def test_python_module_ctia_positive_feedback(
module_headers, get_entity, get_entity_response):
"""Perform testing for feedback entity of custom threat intelligence python
module
ID: CCTRI-162-9e48dd45-c211-4d0e-b909-c28badb790ac
Steps:
1. Send POST request to create new campaign entity using custom python
module to provide source data for feedback entity
2. Send GET request using custom python module to read just created
feedback entity back.
3. Send same GET request, but using direct access to the server
4. Compare results
Expected results: Feedback entity can be created, fetched and deleted using
custom python module. Data stored in the entity is the same no matter
you access it directly or using our tool
Importance: Critical
"""
feedback = get_entity('feedback')
# Create new campaign entity to be used for feedback
post_tool_response = get_entity_response('campaign', CAMPAIGN_PAYLOAD)
campaign_entity_id = post_tool_response['id']
# Create new feedback entity using provided payload with already formed
# campaign entity
post_tool_response = get_entity_response(
'feedback', FEEDBACK_PAYLOAD, dict(entity_id=campaign_entity_id))
values = {
key: post_tool_response[key] for key in [
'feedback',
'reason',
'entity_id',
'type',
'schema_version'
]
}
assert values == FEEDBACK_PAYLOAD
feedback_entity_id = post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_tool_response = feedback.get(feedback_entity_id)
get_direct_response = ctia_get_data(
target_url=FEEDBACK,
entity_id=feedback_entity_id,
**{'headers': module_headers}
).json()
assert get_tool_response == get_direct_response
def test_python_module_ctia_positive_graphql(module_tool_client):
"""Perform testing for graphql entity of custom threat intelligence python
module
ID: CCTRI-162-eed3e3ae-39b3-4c38-ae60-c22c412b2d15
Steps:
1. Send POST request to server to execute GraphQL query using custom
python module
Expected results: POST request sent successfully and got valid response
from server
Importance: Critical
"""
query = (
'query Sightings($query: String, $first: Int) {sightings(query:'
' $query, first: $first, orderBy: [{field: OBSERVED_TIME_START_TIME,'
' direction: desc}]) {nodes {observables {value type} confidence'
' severity description resolution source source_uri observed_time'
' {start_time end_time} relations {relation source {value type}'
' related {value type}}}}}'
)
payload = {
'query': query,
'variables': {'query': 'tags:"ransomware"', 'first': 100}
}
# Create new entity using provided payload
post_tool_response = module_tool_client.private_intel.graphql.post(
payload=payload, params={'wait_for': 'true'})
assert post_tool_response
def test_python_module_ctia_positive_identity_assertion(
module_headers, get_entity, get_entity_response):
"""Perform testing for identity assertion entity of custom threat
intelligence python module
ID: CCTRI-906-3fed238c-cd4c-45b5-a4c9-06c9ac29eb9a
Steps:
1. Send POST request to create new identity assertion entity using
custom python module
2. Send GET request using custom python module to read just created
entity back.
3. Send same GET request, but using direct access to the server
4. Compare results
5. Update identity assertion entity using custom python module
6. Repeat GET request using python module and validate that entity was
updated
Expected results: Identity assertion entity can be created, fetched,
updated and deleted using custom python module. Data stored in the
entity is the same no matter you access it directly or using our tool
Importance: Critical
"""
identity_assertion = get_entity('identity_assertion')
# Create new entity using provided payload
post_tool_response = get_entity_response(
'identity_assertion', IDENTITY_ASSERTION_PAYLOAD)
values = {
key: post_tool_response[key] for key in [
'identity',
'assertions',
'schema_version',
'source',
'type',
'external_ids'
]
}
assert values == IDENTITY_ASSERTION_PAYLOAD
entity_id = post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_tool_response = identity_assertion.get(entity_id)
get_direct_response = ctia_get_data(
target_url=IDENTITY_ASSERTION,
entity_id=entity_id,
**{'headers': module_headers}
).json()
assert get_tool_response == get_direct_response
# Validate that GET request of external_id returns number of external_id
external_id_result = identity_assertion.external_id(3)
assert external_id_result[0]['external_ids'] == ['3']
# Update entity values
put_tool_response = delayed_return(
identity_assertion.put(
id_=entity_id,
payload=PUT_IDENTITY_ASSERTION_PAYLOAD
)
)
assert put_tool_response['assertions'][0]['value'] == 'Low'
get_tool_response = identity_assertion.get(entity_id)
assert get_tool_response['assertions'][0]['value'] == 'Low'
def test_python_module_ctia_positive_identity_assertion_search(get_entity):
"""Perform testing for identity_assertion/search entity of custom threat
intelligence python module
ID: CCTRI-2885 - d83079e8-28b0-4657-9325-c37e16dd040d
Steps:
1. Send POST request to create new identity_assertion entity using
custom python module
2. Send GET request using custom python module to read just created
entity back.
3. Count entities after entity created
4. Delete entity from the system
5. Repeat GET request using python module and validate that entity was
deleted
6. Count entities after entity deleted
7. Compare the amount of entities after creating and deleting entities
Expected results: identity_assertion entity can be created, fetched,
counted and deleted using custom python module. Data stored in the entity
is the same no matter you access it directly or using our tool
Importance: Critical
"""
identity_assertion = get_entity('identity_assertion')
# Create new entity using provided payload
post_tool_response = identity_assertion.post(
payload=IDENTITY_ASSERTION_PAYLOAD, params={'wait_for': 'true'})
entity_id = post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_identity_assertion_search = identity_assertion.search.get(
params={'id': entity_id})
assert get_identity_assertion_search[0]['type'] == 'identity-assertion'
assert get_identity_assertion_search[0]['schema_version'] == '1.1.3'
# Count entities after entity created
count_identity_assertion_before_deleted = identity_assertion.search.count()
# Delete the entity and make attempt to get it back to validate it is
# not there anymore
delayed_return(identity_assertion.search.delete(
params={'id': entity_id, 'REALLY_DELETE_ALL_THESE_ENTITIES': 'true'}))
# Repeat GET request and validate that entity was deleted
assert identity_assertion.search.get(params={'id': entity_id}) == []
# Count entities after entity deleted
count_identity_assertion_after_deleted = identity_assertion.search.count()
# Compare results of count_identity_assertion_before_deleted
# and count_identity_assertion_after_deleted
assert count_identity_assertion_before_deleted !=\
count_identity_assertion_after_deleted
def test_python_module_ctia_positive_identity_assertion_metric(
get_entity, get_entity_response):
"""Perform testing for identity_assertion/metric endpoints of custom threat
intelligence python module
ID: CCTRI-2885 -e8d1f79e-d4f0-4834-9d3c-11f5eb6fabfe
Steps:
1. Send POST request to create new identity_assertion entity using
custom python module
2. Send GET request using custom python module to read just created
entity back.
3. Send GET request to get type of metric/histogram endpoint
4. Send GET request to get type of metric/topn endpoint
5. Send GET request to get type of metric/cardinality endpoint
6. Delete created entity
7. Repeat GET request using python module and validate that entity was
deleted
Expected results: identity_assertion entity can be created, fetched,
researched by metric's endpoints and deleted using custom python module.
Data stored in the entity is the same no matter you access it
directly or using our tool.
Importance: Critical
"""
identity_assertion = get_entity('identity_assertion')
# Create new entity using provided payload
incident_post_tool_response = get_entity_response(
'identity_assertion', IDENTITY_ASSERTION_PAYLOAD)
entity_id = incident_post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_created_identity_assertion = identity_assertion.get(entity_id)
assert get_created_identity_assertion['type'] == 'identity-assertion'
assert get_created_identity_assertion['schema_version'] == '1.1.3'
# Send GET request to get type of metric/histogram endpoint
data_from = get_created_identity_assertion['timestamp']
metric_histogram = identity_assertion.metric.histogram(params={
'granularity': 'week', 'from': data_from, 'aggregate-on': 'timestamp'})
assert metric_histogram['type'] == 'histogram'
# Send GET request to get type of metric/topn endpoint
metric_topn = identity_assertion.metric.topn(params={
'from': data_from, 'aggregate-on': 'identity.observables.type'})
assert metric_topn['type'] == 'topn'
# Send GET request to get type of metric/cardinality endpoint
metric_cardinality = identity_assertion.metric.cardinality(params={
'from': data_from, 'aggregate-on': 'identity.observables.type'})
assert metric_cardinality['type'] == 'cardinality'
def test_python_module_ctia_positive_incident(
module_headers, get_entity, get_entity_response):
"""Perform testing for incident entity of custom threat intelligence python
module
ID: CCTRI-163-e633504e-0b62-4c28-a86f-a43b5bcd53b0
Steps:
1. Send POST request to create new incident entity using custom python
module
2. Send GET request using custom python module to read just created
entity back.
3. Send same GET request, but using direct access to the server
4. Compare
5. Validate that GET request of external_id returns number of
external_id
6. Update incident entity using custom python module
7. Repeat GET request using python module and validate that entity was
updated
8. Send PATCH request to update entity partially
9. Repeat GET request to validate that entity was updated
10. Update incident status using special endpoint for that purpose
11. Repeat GET request to validate that status was updated
Expected results: Incident entity can be created, fetched, updated and
deleted using custom python module. Data stored in the entity is
the same no matter you access it directly or using our tool
Importance: Critical
"""
incident = get_entity('incident')
# Create new entity using provided payload
incident_post_tool_response = get_entity_response(
'incident', INCIDENT_PAYLOAD)
values = {
key: incident_post_tool_response[key] for key in [
'confidence',
'incident_time',
'status',
'type',
'schema_version',
'external_ids'
]
}
assert values == INCIDENT_PAYLOAD
entity_id = incident_post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_tool_response = incident.get(entity_id)
get_direct_response = ctia_get_data(
target_url=INCIDENT,
entity_id=entity_id,
**{'headers': module_headers}
).json()
assert get_tool_response == get_direct_response
# Validate that GET request of external_id returns number of external_id
external_id_result = incident.external_id(3)
assert external_id_result[0]['external_ids'] == ['3']
# Update entity values
put_tool_response = delayed_return(
incident.put(id_=entity_id, payload=PUT_INCIDENT_PAYLOAD))
assert put_tool_response['confidence'] == 'Medium'
get_tool_response = incident.get(entity_id)
assert get_tool_response['confidence'] == 'Medium'
# Validate PATCH request
patch_tool_response = delayed_return(
incident.patch(id_=entity_id, payload={'confidence': 'Low'}))
assert patch_tool_response['confidence'] == 'Low'
get_tool_response = incident.get(entity_id)
assert get_tool_response['confidence'] == 'Low'
# Validate status endpoint
assert get_tool_response['status'] == 'Open'
delayed_return(incident.status(entity_id, {'status': 'New'}))
get_tool_response = incident.get(entity_id)
assert get_tool_response['status'] == 'New'
def test_python_module_ctia_positive_incident_search(get_entity):
"""Perform testing for incident/search entity of custom threat
intelligence python module
ID: CCTRI-2848 - 8fc6ba46-a610-4432-a72b-af92836fa560
Steps:
1. Send POST request to create new incident entity using
custom python module
2. Send GET request using custom python module to read just created
entity back.
3. Count entities after entity created
4. Delete entity from the system
5. Repeat GET request using python module and validate that entity was
deleted
6. Count entities after entity deleted
7. Compare the amount of entities after creating and deleting entities
Expected results: Incident entity can be created, fetched, counted and
deleted using custom python module. Data stored in the entity is
the same no matter you access it directly or using our tool
Importance: Critical
"""
incident = get_entity('incident')
# Create new entity using provided payload
post_tool_response = incident.post(payload=INCIDENT_PAYLOAD,
params={'wait_for': 'true'})
entity_id = post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_incident_search = incident.search.get(
params={'id': entity_id})
assert get_incident_search[0]['type'] == 'incident'
assert get_incident_search[0]['schema_version'] == '1.1.3'
# Count entities after entity created
count_incident_before_deleted = incident.search.count()
# Delete the entity and make attempt to get it back to validate it is
# not there anymore
delayed_return(incident.search.delete(
params={'id': entity_id, 'REALLY_DELETE_ALL_THESE_ENTITIES': 'true'}))
# Repeat GET request and validate that entity was deleted
assert incident.search.get(params={'id': entity_id}) == []
# Count entities after entity deleted
count_incident_after_deleted = incident.search.count()
# Compare results of count_incident_before_deleted
# and count_incident_after_deleted
assert count_incident_before_deleted != count_incident_after_deleted
def test_python_module_ctia_positive_incident_metric(
get_entity, get_entity_response):
"""Perform testing for incident/metric endpoints of custom threat
intelligence python module
ID: CCTRI-2848 -1828964e-ebee-4ed5-939f-f44e8010e0eb
Steps:
1. Send POST request to create new incident entity using
custom python module
2. Send GET request using custom python module to read just created
entity back.
3. Send GET request to get type of metric/histogram endpoint
4. Send GET request to get type of metric/topn endpoint
5. Send GET request to get type of metric/cardinality endpoint
6. Delete created entity
7. Repeat GET request using python module and validate that entity was
deleted
Expected results: Incident entity can be created, fetched,
researched by metric's endpoints and deleted using custom python module.
Data stored in the entity is the same no matter you access it
directly or using our tool.
Importance: Critical
"""
incident = get_entity('incident')
# Create new entity using provided payload
incident_post_tool_response = get_entity_response(
'incident', INCIDENT_PAYLOAD)
entity_id = incident_post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_created_incident = incident.get(entity_id)
assert get_created_incident['type'] == 'incident'
assert get_created_incident['schema_version'] == '1.1.3'
# Send GET request to get type of metric/histogram endpoint
data_from = get_created_incident['timestamp']
metric_histogram = incident.metric.histogram(params={
'granularity': 'week', 'from': data_from, 'aggregate-on': 'timestamp'})
assert metric_histogram['type'] == 'histogram'
# Send GET request to get type of metric/topn endpoint
metric_topn = incident.metric.topn(params={
'from': data_from, 'aggregate-on': 'source'})
assert metric_topn['type'] == 'topn'
# Send GET request to get type of metric/cardinality endpoint
metric_cardinality = incident.metric.cardinality(params={
'from': data_from, 'aggregate-on': 'source'})
assert metric_cardinality['type'] == 'cardinality'
def test_python_module_ctia_positive_sightings_incident(
module_headers, get_entity, get_entity_response):
"""Perform testing for incident entity of custom threat intelligence
python module
ID: CCTRI-2968 -aa6ada6a-3fea-4743-bb46-85ebb38b1c6c
Steps:
1. Send POST request to create new sighting entity using custom python
module
2. Send POST request to create new incident entity using custom python
module
3. Send POST request to create new relationship entity using custom
python module
4. Sent GET request to get data
Expected results: Incident and sighting entities can be created, added
into relationship using custom python module.
Data stored in the entity is the same no matter you access it directly
or using our tool
Importance: Critical
"""
# Create new sighting entity using provided payload
sighting_post_tool_response = get_entity_response(
'sighting', SIGHTING_PAYLOAD)
values = {
key: sighting_post_tool_response[key] for key in [
'count',
'observed_time',
'confidence',
'type',
'schema_version',
'external_ids',
'observables'
]
}
assert values == SIGHTING_PAYLOAD
# Create new incident entity using provided payload
incident = get_entity('incident')
incident_post_tool_response = get_entity_response(
'incident', INCIDENT_PAYLOAD)
values = {
key: incident_post_tool_response[key] for key in [
'confidence',
'incident_time',
'status',
'type',
'schema_version',
'external_ids'
]
}
assert values == INCIDENT_PAYLOAD
# Create new relationship entity using provided payload
relationship_post_tool_response = get_entity_response(
'relationship', RELATIONSHIP_PAYLOAD,
dict(source_ref=sighting_post_tool_response['id'],
target_ref=incident_post_tool_response['id']))
assert relationship_post_tool_response['type'] == 'relationship'
assert relationship_post_tool_response['description'] == 'Test relation'
# Validate that GET judgement indicator request return data
observable_type = sighting_post_tool_response['observables'][0]['type']
observable_value = sighting_post_tool_response['observables'][0]['value']
sightings_incidents_response = incident.sightings.incidents(
observable_type=observable_type, observable_value=observable_value)
assert sightings_incidents_response[0] == incident_post_tool_response['id']
def test_python_module_ctia_positive_incident_link(
module_headers, module_tool_client, get_entity):
"""Perform testing for investigation entity of custom threat intelligence
python module
ID: CCTRI-2968-24862487-a750-487f-8d58-c86737aa0d75
Steps:
1. Send POST request to create new casebook entity using custom python
module
2. Send POST request to create new incident entity using custom python
module
3. Delete the relationship entity and make attempt to get it back
to validate it is not there anymore
4. Delete the incident entity and make attempt to get it back to
validate it is not there anymore
5. Delete casebook entity and make attempt to get it back to validate
it is not there anymore
Expected results: Incident entity can be created,
deleted using custom python module. Data stored in the entity is
the same no matter you access it directly or using our tool
Importance: Critical
"""
casebook = get_entity('casebook')
incident = get_entity('incident')
# Create casebook entity using provided payload
casebook_post_tool_response = casebook.post(
payload=CASEBOOK_PAYLOAD, params={'wait_for': 'true'})
casebook_id = casebook_post_tool_response['id']
# Add one observable to casebook using special endpoint for this purpose
delayed_return(
casebook.observables(
casebook_id,
{
'operation': 'add',
'observables': casebook_post_tool_response['observables']
}
)
)
get_tool_response_casebook = casebook.get(casebook_id)
assert get_tool_response_casebook['observables'][0] ==\
casebook_post_tool_response['observables'][0]
# Create incident entity using provided payload
incident_post_tool_response = incident.post(
payload=INCIDENT_PAYLOAD, params={'wait_for': 'true'})
incident_id = incident_post_tool_response['id'].rpartition('/')[-1]
# Sent POST request
link_payload = {
"casebook_id": casebook_id,
"tlp": "white"
}
link_request = incident.link(incident_id, payload=link_payload)
assert link_request['type'] == 'relationship'
assert link_request['schema_version'] == '1.1.3'
relationships_id = link_request['id']
# Delete the incident entity and make attempt to get it back to validate
# it is not there anymore
delayed_return(incident.delete(incident_id))
with pytest.raises(HTTPError):
incident.get(incident_id)
# Delete casebook entity and make attempt to get it back to validate it is
# not there anymore
delayed_return(casebook.delete(casebook_id))
with pytest.raises(HTTPError):
casebook.get(casebook_id)
# Delete the relationship entity and make attempt to get it back to
# validate it is not there anymore
relationship = module_tool_client.private_intel.relationship
delayed_return(relationship.delete(relationships_id,
params={'wait_for': 'true'}))
with pytest.raises(HTTPError):
relationship.get(relationships_id)
def test_python_module_ctia_positive_indicator(
module_headers, get_entity, get_entity_response):
"""Perform testing for indicator entity of custom threat intelligence
python module
ID: CCTRI-163-f73c4512-9faa-462f-929f-c7ae3f79f887
Steps:
1. Send POST request to create new indicator entity using custom python
module
2. Send GET request using custom python module to read just created
entity back.
3. Send same GET request, but using direct access to the server
4. Compare results
5. Validate that GET request of external_id returns number of
external_id
6. Update indicator entity using custom python module
7. Repeat GET request using python module and validate that entity was
updated
Expected results: Indicator entity can be created, fetched, updated and
deleted using custom python module. Data stored in the entity is
the same no matter you access it directly or using our tool
Importance: Critical
"""
indicator = get_entity('indicator')
# Create new entity using provided payload
post_tool_response = get_entity_response('indicator', INDICATOR_PAYLOAD)
values = {
key: post_tool_response[key] for key in [
'producer',
'revision',
'type',
'schema_version',
'external_ids'
]
}
assert values == INDICATOR_PAYLOAD
entity_id = post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_tool_response = indicator.get(entity_id)
get_direct_response = ctia_get_data(
target_url=INDICATOR,
entity_id=entity_id,
**{'headers': module_headers}
).json()
assert get_tool_response == get_direct_response
# Validate that GET request of external_id returns number of external_id
external_id_result = indicator.external_id(3)
assert external_id_result[0]['external_ids'] == ['3']
# Update entity values
put_tool_response = delayed_return(
indicator.put(
id_=entity_id,
payload={
'revision': 1,
'producer': 'producer',
}
)
)
assert put_tool_response['revision'] == 1
get_tool_response = indicator.get(entity_id)
assert get_tool_response['revision'] == 1
def test_python_module_ctia_positive_indicator_search(get_entity):
"""Perform testing for indicator/search entity of custom threat
intelligence python module
ID: CCTRI-2848 - 6137f999-74e9-456e-bea8-42f26341de43
Steps:
1. Send POST request to create new indicator entity using
custom python module
2. Send GET request using custom python module to read just created
entity back.
3. Count entities after entity created
4. Delete entity from the system
5. Repeat GET request using python module and validate that entity was
deleted
6. Count entities after entity deleted
7. Compare the amount of entities after creating and deleting entities
Expected results: Indicator entity can be created, fetched, counted and
deleted using custom python module. Data stored in the entity is
the same no matter you access it directly or using our tool
Importance: Critical
"""
indicator = get_entity('indicator')
# Create new entity using provided payload
post_tool_response = indicator.post(payload=INDICATOR_PAYLOAD,
params={'wait_for': 'true'})
entity_id = post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_indicator_search = indicator.search.get(
params={'id': entity_id})
assert get_indicator_search[0]['type'] == 'indicator'
assert get_indicator_search[0]['schema_version'] == '1.1.3'
# Count entities after entity created
count_indicator_before_deleted = indicator.search.count()
# Delete the entity and make attempt to get it back to validate it is
# not there anymore
delayed_return(indicator.search.delete(params={
'id': entity_id, 'REALLY_DELETE_ALL_THESE_ENTITIES': 'true'}))
# Repeat GET request and validate that entity was deleted
assert indicator.search.get(params={'id': entity_id}) == []
# Count entities after entity deleted
count_indicator_after_deleted = indicator.search.count()
# Compare results of count_indicator_before_deleted
# and count_indicator_after_deleted
assert count_indicator_before_deleted != count_indicator_after_deleted
def test_python_module_ctia_positive_indicator_metric(
get_entity, get_entity_response):
"""Perform testing for indicator/metric endpoints of custom threat
intelligence python module
ID: CCTRI-2848 -36009d09-8efc-412d-8003-33fb148ba8bf
Steps:
1. Send POST request to create new indicator entity using
custom python module
2. Send GET request using custom python module to read just created
entity back.
3. Send GET request to get type of metric/histogram endpoint
4. Send GET request to get type of metric/topn endpoint
5. Send GET request to get type of metric/cardinality endpoint
Expected results: Indicator entity can be created, fetched,
researched by metric's endpoints and deleted using custom python module.
Data stored in the entity is the same no matter you access it
directly or using our tool.
Importance: Critical
"""
indicator = get_entity('indicator')
# Create new entity
post_tool_response = get_entity_response('indicator', INDICATOR_PAYLOAD)
entity_id = post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_created_indicator = indicator.get(entity_id)
assert get_created_indicator['type'] == 'indicator'
assert get_created_indicator['schema_version'] == '1.1.3'
# Send GET request to get type of metric/histogram endpoint
data_from = get_created_indicator['timestamp']
metric_histogram = indicator.metric.histogram(params={
'granularity': 'week', 'from': data_from, 'aggregate-on': 'timestamp'})
assert metric_histogram['type'] == 'histogram'
# Send GET request to get type of metric/topn endpoint
metric_topn = indicator.metric.topn(params={
'from': data_from, 'aggregate-on': 'source'})
assert metric_topn['type'] == 'topn'
# Send GET request to get type of metric/cardinality endpoint
metric_cardinality = indicator.metric.cardinality(params={
'from': data_from, 'aggregate-on': 'source'})
assert metric_cardinality['type'] == 'cardinality'
def test_python_module_ctia_positive_judgements_indicator(
module_headers, get_entity, get_entity_response):
"""Perform testing for indicator entity of custom threat intelligence
python module
ID: CCTRI-2968 -2ff5e78f-d8f5-4405-a418-32ea166cc907
Steps:
1. Send POST request to create new judgement entity using custom python
module
2. Send POST request to create new indicator entity using custom python
module
3. Send POST request to create new relationship entity using custom
python module
4. Sent GET request to get data
Expected results: Indicator and judgement entities can be created, added
into relationship using custom python module.
Data stored in the entity is the same no matter you access it directly
or using our tool
Importance: Critical
"""
# Create new judgement entity using provided payload
judgement_post_response = get_entity_response(
'judgement', JUDGEMENT_PAYLOAD)
# Create new indicator using provided payload
indicator = get_entity('indicator')
indicator_post_response = get_entity_response(
'indicator', INDICATOR_PAYLOAD)
# Use created entities for relationship
# Create new relationship entity using provided payload
relationship_post_tool_response = get_entity_response(
'relationship', RELATIONSHIP_PAYLOAD,
dict(source_ref=judgement_post_response['id'],
target_ref=indicator_post_response['id']))
assert relationship_post_tool_response['description'] == 'Test relation'
assert relationship_post_tool_response['type'] == 'relationship'
# Validate that GET judgement indicator request return data
observable_type = judgement_post_response['observable']['value']
observable_value = judgement_post_response['observable']['type']
judgement_indicator_response = indicator.judgements.indicators(
observable_type=observable_type,
observable_value=observable_value)
assert judgement_indicator_response[0] == indicator_post_response['id']
def test_python_module_ctia_positive_sightings_indicator(
module_headers, get_entity, get_entity_response):
"""Perform testing for indicator entity of custom threat intelligence
python module
ID: CCTRI-2968-070cfd62-f15f-4bfe-8d36-2b7c0aa5654a
Steps:
1. Send POST request to create new sighting entity using custom python
module
2. Send POST request to create new indicator entity using custom python
module
3. Send POST request to create new relationship entity using custom
python module
4. Sent GET request to get data
Expected results: Indicator and sighting entities can be created, added
into relationship using custom python module.
Data stored in the entity is the same no matter you access it directly
or using our tool
Importance: Critical
"""
sighting_post_tool_response = get_entity_response(
'sighting', SIGHTING_PAYLOAD)
values = {
key: sighting_post_tool_response[key] for key in [
'count',
'observed_time',
'confidence',
'type',
'schema_version',
'external_ids',
'observables'
]
}
assert values == SIGHTING_PAYLOAD
indicator = get_entity('indicator')
indicator_post_tool_response = get_entity_response(
'indicator', INDICATOR_PAYLOAD)
# Use created entities for relationship
relationship_post_tool_response = get_entity_response(
'relationship', RELATIONSHIP_PAYLOAD,
dict(source_ref=sighting_post_tool_response['id'],
target_ref=indicator_post_tool_response['id']))
assert relationship_post_tool_response['description'] == 'Test relation'
assert relationship_post_tool_response['type'] == 'relationship'
# Validate that GET judgement indicator request return data
observable_type = sighting_post_tool_response['observables'][0]['type']
observable_value = sighting_post_tool_response['observables'][0]['value']
sightings_indicator_response = indicator.sightings.indicators(
observable_type=observable_type, observable_value=observable_value)
assert sightings_indicator_response[0] ==\
indicator_post_tool_response['id']
def test_python_module_ctia_positive_investigation(
module_headers, get_entity, get_entity_response):
"""Perform testing for investigation entity of custom threat intelligence
python module
ID: CCTRI-167-90f58543-649d-442b-84ec-9a8f4de83d21
Steps:
1. Send POST request to create new investigation entity using custom
python module
2. Send GET request using custom python module to read just created
entity back.
3. Send same GET request, but using direct access to the server
4. Compare results
5. Validate that GET request of external_id returns number of
external_id
6. Update investigation entity using custom python module
7. Repeat GET request using python module and validate that entity was
updated
8. Delete entity from the system
Expected results: Investigation entity can be created, fetched, updated and
deleted using custom python module. Data stored in the entity is
the same no matter you access it directly or using our tool
Importance: Critical
"""
investigation = get_entity('investigation')
# Create new entity using provided payload
investigation_post_tool_response = get_entity_response(
'investigation', INVESTIGATION_PAYLOAD)
values = {
key: investigation_post_tool_response[key] for key in [
'title',
'description',
'source',
'type',
'schema_version',
'external_ids'
]
}
assert values == INVESTIGATION_PAYLOAD
entity_id = investigation_post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_tool_response = investigation.get(entity_id)
get_direct_response = ctia_get_data(
target_url=INVESTIGATION,
entity_id=entity_id,
**{'headers': module_headers}
).json()
assert get_tool_response == get_direct_response
# Validate that GET request of external_id returns number of external_id
external_id_result = investigation.external_id(3)
assert external_id_result[0]['external_ids'] == ['3']
# Update entity values
put_tool_response = delayed_return(
investigation.put(
id_=entity_id,
payload={'title': 'New demo investigation', 'source': 'a source'}
)
)
assert put_tool_response['title'] == 'New demo investigation'
get_tool_response = investigation.get(entity_id)
assert get_tool_response['title'] == 'New demo investigation'
def test_python_module_ctia_positive_investigation_search(get_entity):
"""Perform testing for investigation/search entity of custom threat
intelligence python module
ID: CCTRI-2848 - 7dae9799-2ae0-4a8c-81ae-99477bb4833a
Steps:
1. Send POST request to create new investigation entity using
custom python module
2. Send GET request using custom python module to read just created
entity back.
3. Count entities after entity created
4. Delete entity from the system
5. Repeat GET request using python module and validate that entity was
deleted
6. Count entities after entity deleted
7. Compare the amount of entities after creating and deleting entities
Expected results: Investigation entity can be created, fetched, counted and
deleted using custom python module. Data stored in the entity is
the same no matter you access it directly or using our tool
Importance: Critical
"""
investigation = get_entity('investigation')
# Create new entity using provided payload
investigation_post_tool_response = investigation.post(
payload=INVESTIGATION_PAYLOAD, params={'wait_for': 'true'})
entity_id = investigation_post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_investigation_search = investigation.search.get(
params={'id': entity_id})
assert get_investigation_search[0]['type'] == 'investigation'
assert get_investigation_search[0]['schema_version'] == '1.1.3'
# Count entities after entity created
count_investigation_before_deleted = investigation.search.count()
# Delete the entity and make attempt to get it back to validate it is
# not there anymore
delayed_return(investigation.search.delete(params={
'id': entity_id, 'REALLY_DELETE_ALL_THESE_ENTITIES': 'true'}))
# Repeat GET request and validate that entity was deleted
assert investigation.search.get(params={'id': entity_id}) == []
# Count entities after entity deleted
count_investigation_after_deleted = investigation.search.count()
# Compare results of count_investigation_before_deleted
# and get_investigation_search_count2
assert count_investigation_before_deleted !=\
count_investigation_after_deleted
def test_python_module_ctia_positive_investigation_metric(
get_entity, get_entity_response):
"""Perform testing for investigation/metric endpoints of custom threat
intelligence python module
ID: CCTRI-2848 -b1148fab-b57e-409c-a6b4-2ce0bd229bf1
Steps:
1. Send POST request to create new investigation entity using
custom python module
2. Send GET request using custom python module to read just created
entity back.
3. Send GET request to get type of metric/histogram endpoint
4. Send GET request to get type of metric/topn endpoint
5. Send GET request to get type of metric/cardinality endpoint
Expected results: Investigation entity can be created, fetched,
researched by metric's endpoints and deleted using custom python module.
Data stored in the entity is the same no matter you access it
directly or using our tool.
Importance: Critical
"""
investigation = get_entity('investigation')
# Create new entity using provided payload
post_tool_response = get_entity_response(
'investigation', INVESTIGATION_PAYLOAD)
entity_id = post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_created_investigation = investigation.get(entity_id)
assert get_created_investigation['type'] == 'investigation'
assert get_created_investigation['schema_version'] == '1.1.3'
# Send GET request to get type of metric/histogram endpoint
data_from = get_created_investigation['timestamp']
metric_histogram = investigation.metric.histogram(params={
'granularity': 'week', 'from': data_from, 'aggregate-on': 'timestamp'})
assert metric_histogram['type'] == 'histogram'
# Send GET request to get type of metric/topn endpoint
metric_topn = investigation.metric.topn(params={
'from': data_from, 'aggregate-on': 'source'})
assert metric_topn['type'] == 'topn'
# Send GET request to get type of metric/cardinality endpoint
metric_cardinality = investigation.metric.cardinality(params={
'from': data_from, 'aggregate-on': 'source'})
assert metric_cardinality['type'] == 'cardinality'
def test_python_module_ctia_positive_judgement(
module_headers, get_entity, get_entity_response):
"""Perform testing for judgement entity of custom threat intelligence
python module
ID: CCTRI-163-75d6960a-6bf3-40cd-965c-c53a81cb0ffd
Steps:
1. Send POST request to create new judgement entity using custom python
module
2. Send GET request using custom python module to read just created
entity back.
3. Send same GET request, but using direct access to the server
4. Compare results
5. Validate that GET request of external_id returns number of
external_id
6. Validate that GET sighting request returns observeble and type of
created entity
7. Make an attempt to update judgement entity using custom python
module
8. Check that error is returned
9. Create expired judgement via /ctia/judgement/{id}/expire endpoint
10. Delete entity from the system
Expected results: Judgement entity can be created, fetched and deleted
using custom python module. Data stored in the entity is the same
no matter you access it directly or using our tool
Importance: Critical
"""
judgement = get_entity('judgement')
# Create new entity using provided payload
post_tool_response = get_entity_response('judgement', JUDGEMENT_PAYLOAD)
values = {
key: post_tool_response[key] for key in [
'confidence',
'disposition',
'disposition_name',
'observable',
'priority',
'schema_version',
'observable',
'severity',
'source',
'type',
'external_ids'
]
}
assert values == JUDGEMENT_PAYLOAD
entity_id = post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_tool_response = judgement.get(entity_id)
get_direct_response = ctia_get_data(
target_url=JUDGEMENT,
entity_id=entity_id,
**{'headers': module_headers}
).json()
assert get_tool_response == get_direct_response
# Validate that GET request of external_id returns number of external_id
external_id_result = judgement.external_id(3)
assert external_id_result[0]['external_ids'] == ['3']
# Validate that GET sighting request returns observable and type of
# created entity
observable_value = get_tool_response['observable']['value']
observable_type = get_tool_response['observable']['type']
get_observable_of_judgement = judgement.judgements(
observable_type=observable_type, observable_value=observable_value)
assert get_observable_of_judgement
assert get_observable_of_judgement[0]['observable']['value'] ==\
observable_value
assert get_observable_of_judgement[0]['observable']['type'] ==\
observable_type
# Make an attempt to update Judgement using endpoint which is not
# implemented in application
with pytest.raises(HTTPError) as context:
judgement.put(id_=entity_id, payload=PUT_JUDGEMENT_PAYLOAD)
assert '"error": "missing_capability"' in str(context.value)
# Create expired judgement
expired_judgement = judgement.expire(entity_id, payload={},
params={'reason': 'For test'})
assert expired_judgement['reason'] == ' For test'
def test_python_module_ctia_positive_judgement_search(get_entity):
"""Perform testing for judgement/search entity of custom threat
intelligence python module
ID: CCTRI-2848 - 5f5b8907-9e76-4bbb-aa11-330721f569eb
Steps:
1. Send POST request to create new judgement entity using custom python
module
2. Send GET request using custom python module to read just created
entity back.
3. Count entities after entity created
4. Delete entity from the system
5. Repeat GET request using python module and validate that entity was
deleted
6. Count entities after entity deleted
7. Compare the amount of entities after creating and deleting entities
Expected results: Actor entity can be created, fetched, counted and
deleted using custom python module. Data stored in the entity is
the same no matter you access it directly or using our tool
Importance: Critical
"""
judgement = get_entity('judgement')
# Create new entity using provided payload
post_tool_response = judgement.post(payload=JUDGEMENT_PAYLOAD,
params={'wait_for': 'true'})
entity_id = post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_judgement_search = judgement.search.get(params={'id': entity_id})
assert get_judgement_search[0]['type'] == 'judgement'
# Count entities after entity created
count_judgement_before_deleted = judgement.search.count()
# Delete the entity and make attempt to get it back to validate it is
# not there anymore
delayed_return(judgement.search.delete(params={
'id': entity_id, 'REALLY_DELETE_ALL_THESE_ENTITIES': 'true'}))
# Repeat GET request and validate that entity was deleted
assert judgement.search.get(params={'id': entity_id}) == []
# Count entities after entity deleted
count_judgement_after_deleted = judgement.search.count()
# Compare results of count_judgement_before_deleted
# and count_judgement_after_deleted
assert count_judgement_before_deleted != count_judgement_after_deleted
def test_python_module_ctia_positive_judgement_metric(
get_entity, get_entity_response):
"""Perform testing for judgement/metric endpoints of custom threat
intelligence python module
ID: CCTRI-2848 -7bddcca2-0188-4885-9289-fa0797bf1448
Steps:
1. Send POST request to create new judgement entity using custom python
module
2. Send GET request using custom python module to read just created
entity back.
3. Send GET request to get type of metric/histogram endpoint
4. Send GET request to get type of metric/topn endpoint
5. Send GET request to get type of metric/cardinality endpoint
Expected results: Actor entity can be created, fetched, researched by
metric's endpoints and deleted using custom python module.
Data stored in the entity is the same no matter you access it
directly or using our tool.
Importance: Critical
"""
judgement = get_entity('judgement')
# Create new entity using provided payload
post_tool_response = get_entity_response('judgement', JUDGEMENT_PAYLOAD)
entity_id = post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_created_judgement = judgement.get(entity_id)
assert get_created_judgement['type'] == 'judgement'
assert get_created_judgement['source'] == 'source'
# Send GET request to get type of metric/histogram endpoint
data_from = get_created_judgement['timestamp']
metric_histogram = judgement.metric.histogram(params={
'granularity': 'week', 'from': data_from, 'aggregate-on': 'timestamp'})
assert metric_histogram['type'] == 'histogram'
# Send GET request to get type of metric/topn endpoint
metric_topn = judgement.metric.topn(params={
'from': data_from, 'aggregate-on': 'confidence'})
assert metric_topn['type'] == 'topn'
# Send GET request to get type of metric/cardinality endpoint
metric_cardinality = judgement.metric.cardinality(
params={'from': data_from, 'aggregate-on': 'confidence'})
assert metric_cardinality['type'] == 'cardinality'
def test_python_module_ctia_positive_malware(
module_headers, get_entity, get_entity_response):
"""Perform testing for malware entity of custom threat intelligence python
module
ID: CCTRI-164-056ef37c-171d-4b1d-ae3d-4601aaa465bb
Steps:
1. Send POST request to create new malware entity using custom python
module
2. Send GET request using custom python module to read just created
entity back.
3. Send same GET request, but using direct access to the server
4. Compare results
5. Validate that GET request of external_id returns number of
external_id
6. Update malware entity using custom python module
7. Repeat GET request using python module and validate that entity was
updated
8. Delete entity from the system
Expected results: Malware entity can be created, fetched, updated and
deleted using custom python module. Data stored in the entity is
the same no matter you access it directly or using our tool
Importance: Critical
"""
malware = get_entity('malware')
# Create new entity using provided payload
post_tool_response = get_entity_response('malware', MALWARE_PAYLOAD)
values = {
key: post_tool_response[key] for key in [
'title',
'labels',
'type',
'schema_version',
'description',
'short_description',
'external_ids'
]
}
assert values == MALWARE_PAYLOAD
entity_id = post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_tool_response = malware.get(entity_id)
get_direct_response = ctia_get_data(
target_url=MALWARE,
entity_id=entity_id,
**{'headers': module_headers}
).json()
assert get_tool_response == get_direct_response
# Validate that GET request of external_id returns number of external_id
external_id_result = malware.external_id(3)
assert external_id_result[0]['external_ids'] == ['3']
# Update entity values
put_tool_response = delayed_return(
malware.put(id_=entity_id, payload=PUT_MALWARE_PAYLOAD)
)
assert put_tool_response['title'] == 'Changed title for test'
get_tool_response = malware.get(entity_id)
assert get_tool_response['title'] == 'Changed title for test'
def test_python_module_ctia_positive_malware_search(get_entity):
"""Perform testing for malware/search entity of custom threat
intelligence python module
ID: CCTRI-2848 - 9f54a221-0e7b-4410-9737-84c61ab32dfe
Steps:
1. Send POST request to create new malware entity using
custom python module
2. Send GET request using custom python module to read just created
entity back.
3. Count entities after entity created
4. Delete entity from the system
5. Repeat GET request using python module and validate that entity was
deleted
6. Count entities after entity deleted
7. Compare the amount of entities after creating and deleting entities
Expected results: Malware entity can be created, fetched, counted and
deleted using custom python module. Data stored in the entity is
the same no matter you access it directly or using our tool
Importance: Critical
"""
malware = get_entity('malware')
# Create new entity using provided payload
post_tool_response = malware.post(payload=MALWARE_PAYLOAD,
params={'wait_for': 'true'})
entity_id = post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_malware_search = malware.search.get(
params={'id': entity_id})
assert get_malware_search[0]['type'] == 'malware'
assert get_malware_search[0]['schema_version'] == '1.1.3'
# Count entities after entity created
count_malware_before_deleted = malware.search.count()
# Delete the entity and make attempt to get it back to validate it is
# not there anymore
delayed_return(malware.search.delete(params={
'id': entity_id, 'REALLY_DELETE_ALL_THESE_ENTITIES': 'true'}))
# Repeat GET request and validate that entity was deleted
assert malware.search.get(params={'id': entity_id}) == []
# Count entities after entity deleted
count_malware_after_deleted = malware.search.count()
# Compare results of count_malware_before_deleted
# and count_malware_after_deleted
assert count_malware_before_deleted != count_malware_after_deleted
def test_python_module_ctia_positive_malware_metric(
get_entity, get_entity_response):
"""Perform testing for malware/metric endpoints of custom threat
intelligence python module
ID: CCTRI-2848 -33b01f79-0d65-4aef-a1b0-c8f497400508
Steps:
1. Send POST request to create new malware entity using
custom python module
2. Send GET request using custom python module to read just created
entity back.
3. Send GET request to get type of metric/histogram endpoint
4. Send GET request to get type of metric/topn endpoint
5. Send GET request to get type of metric/cardinality endpoint
Expected results: Malware entity can be created, fetched,
researched by metric's endpoints and deleted using custom python module.
Data stored in the entity is the same no matter you access it
directly or using our tool.
Importance: Critical
"""
malware = get_entity('malware')
# Create new entity using provided payload
post_tool_response = get_entity_response('malware', MALWARE_PAYLOAD)
entity_id = post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_created_malware = malware.get(entity_id)
assert get_created_malware['type'] == 'malware'
assert get_created_malware['schema_version'] == '1.1.3'
# Send GET request to get type of metric/histogram endpoint
data_from = get_created_malware['timestamp']
metric_histogram = malware.metric.histogram(params={
'granularity': 'week', 'from': data_from, 'aggregate-on': 'timestamp'})
assert metric_histogram['type'] == 'histogram'
# Send GET request to get type of metric/topn endpoint
metric_topn = malware.metric.topn(params={
'from': data_from, 'aggregate-on': 'source'})
assert metric_topn['type'] == 'topn'
# Send GET request to get type of metric/cardinality endpoint
metric_cardinality = malware.metric.cardinality(params={
'from': data_from, 'aggregate-on': 'source'})
assert metric_cardinality['type'] == 'cardinality'
def test_python_module_ctia_positive_relationship(
module_headers, get_entity, get_entity_response):
"""Perform testing for relationship entity of custom threat intelligence
python module
ID: CCTRI-164-f3c6e3c2-b437-4db9-a630-3c6072517ff2
Steps:
1. Send POST request to create one campaign entity to be used for
relationship functionality
2. Send POST request to create one indicator entity to be used for
relationship functionality
3. Send POST request to create new relationship entity using custom
python module
4. Send GET request using custom python module to read just created
entity back.
5. Send same GET request, but using direct access to the server
6. Validate that GET request of external_id returns number of
external_id
7. Compare results
8. Update relationship entity using custom python module
9. Repeat GET request using python module and validate that entity was
updated
Expected results: Relationship entity can be created, fetched, updated and
deleted using custom python module. Data stored in the entity is the
same no matter you access it directly or using our tool
Importance: Critical
"""
relationship = get_entity('relationship')
# Create new campaign using provided payload
campaign_post_tool_response =\
get_entity_response('campaign', CAMPAIGN_PAYLOAD)
# Create new indicator using provided payload
indicator_post_tool_response =\
get_entity_response('indicator', INDICATOR_PAYLOAD)
# Create new entity using provided payload
relationship_post_tool_response =\
get_entity_response('relationship', RELATIONSHIP_PAYLOAD,
dict(source_ref=campaign_post_tool_response['id'],
target_ref=indicator_post_tool_response['id'])
)
values = {
key: relationship_post_tool_response[key] for key in [
'description',
'source_ref',
'target_ref',
'relationship_type',
'type',
'schema_version',
'external_ids'
]
}
assert values == RELATIONSHIP_PAYLOAD
entity_id = relationship_post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_tool_response = relationship.get(entity_id)
get_direct_response = ctia_get_data(
target_url=RELATIONSHIP,
entity_id=entity_id,
**{'headers': module_headers}
).json()
assert get_tool_response == get_direct_response
# Validate that GET request of external_id returns number of external_id
external_id_result = relationship.external_id(3)
assert external_id_result[0]['external_ids'] == ['3']
# Update entity values
put_tool_response = delayed_return(
relationship.put(
id_=entity_id,
payload={
'description': 'New demo relation',
'source_ref': campaign_post_tool_response['id'],
'target_ref': indicator_post_tool_response['id'],
'relationship_type': 'indicates',
}
)
)
assert put_tool_response['description'] == 'New demo relation'
get_tool_response = relationship.get(entity_id)
assert get_tool_response['description'] == 'New demo relation'
def test_python_module_ctia_positive_relationship_search(
module_tool_client, get_entity, get_entity_response):
"""Perform testing for relationship/search entity of custom threat
intelligence python module
ID: CCTRI-2848 - 55dedd52-678a-4513-9b43-0bb88599d3f5
Steps:
1. Send POST request to create one campaign entity to be used for
relationship functionality
2. Send POST request to create one indicator entity to be used for
relationship functionality
3. Send POST request to create new relationship entity using custom
python module
4. Send GET request using custom python module to read just created
entity back.
5. Count entities after entity created
6. Delete entity from the system
7. Repeat GET request using python module and validate that entity was
deleted
8. Count entities after entity deleted
9. Compare the amount of entities after creating and deleting entities
Expected results: Relationship entity can be created, fetched, counted and
deleted using custom python module. Data stored in the entity is
the same no matter you access it directly or using our tool
Importance: Critical
"""
# Create new campaign using provided payload
campaign_post_tool_response = get_entity_response(
'campaign', CAMPAIGN_PAYLOAD)
# Create new indicator using provided payload
indicator_post_tool_response = get_entity_response(
'indicator', INDICATOR_PAYLOAD)
# Use created entities for relationship
relationship = module_tool_client.private_intel.relationship
payload = {
'description': 'Test relation',
'schema_version': campaign_post_tool_response['schema_version'],
'type': 'relationship',
'source_ref': campaign_post_tool_response['id'],
'target_ref': indicator_post_tool_response['id'],
'relationship_type': 'indicates',
}
# Create new entity using provided payload
post_tool_response = relationship.post(payload=payload,
params={'wait_for': 'true'})
entity_id = post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_relationship_search = relationship.search.get(
params={'id': entity_id})
assert get_relationship_search[0]['type'] == 'relationship'
assert get_relationship_search[0]['schema_version'] == '1.1.3'
assert get_relationship_search[0]['description'] == 'Test relation'
# Count entities after entity created
count_relationship_before_deleted = relationship.search.count()
# Delete the entity and make attempt to get it back to validate it is
# not there anymore
delayed_return(relationship.search.delete(params={
'id': entity_id, 'REALLY_DELETE_ALL_THESE_ENTITIES': 'true'}))
# Repeat GET request and validate that entity was deleted
assert relationship.search.get(params={'id': entity_id}) == []
# Count entities after entity deleted
count_relationship_after_deleted = relationship.search.count()
# Compare results of count_relationship_before_deleted
# and count_relationship_after_deleted
assert count_relationship_before_deleted !=\
count_relationship_after_deleted
def test_python_module_ctia_positive_relationship_metric(
get_entity, get_entity_response):
"""Perform testing for relationship/metric endpoints of custom threat
intelligence python module
ID: CCTRI-2848 -4d34bfc9-eec7-4c28-b53c-f6c83e46a9d1
Steps:
1. Send POST request to create one campaign entity to be used for
relationship functionality
2. Send POST request to create one indicator entity to be used for
relationship functionality
3. Send POST request to create new relationship entity using custom
python module
4. Send GET request using custom python module to read just created
entity back.
5. Send GET request to get type of metric/histogram endpoint
6. Send GET request to get type of metric/topn endpoint
7. Send GET request to get type of metric/cardinality endpoint
Expected results: Relationship entity can be created, fetched,
researched by metric's endpoints and deleted using custom python module.
Data stored in the entity is the same no matter you access it
directly or using our tool.
Importance: Critical
"""
# Create new campaign using provided payload
campaign_post_tool_response = get_entity_response(
'campaign', CAMPAIGN_PAYLOAD)
# Create new indicator using provided payload
indicator_post_tool_response = get_entity_response(
'indicator', INDICATOR_PAYLOAD)
# Create new entity using provided payload
relationship = get_entity('relationship')
relationship_post_tool_response = get_entity_response(
'relationship', RELATIONSHIP_PAYLOAD,
dict(source_ref=campaign_post_tool_response['id'],
target_ref=indicator_post_tool_response['id']))
entity_id = relationship_post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_created_relationship = relationship.get(entity_id)
assert get_created_relationship['type'] == 'relationship'
assert get_created_relationship['schema_version'] == '1.1.3'
assert get_created_relationship['description'] == 'Test relation'
# Send GET request to get type of metric/histogram endpoint
data_from = get_created_relationship['timestamp']
metric_histogram = relationship.metric.histogram(params={
'granularity': 'week', 'from': data_from, 'aggregate-on': 'timestamp'})
assert metric_histogram['type'] == 'histogram'
# Send GET request to get type of metric/topn endpoint
metric_topn = relationship.metric.topn(params={
'from': data_from, 'aggregate-on': 'source'})
assert metric_topn['type'] == 'topn'
# Send GET request to get type of metric/cardinality endpoint
metric_cardinality = relationship.metric.cardinality(params={
'from': data_from, 'aggregate-on': 'source'})
assert metric_cardinality['type'] == 'cardinality'
def test_python_module_ctia_positive_sighting(
module_headers, get_entity, get_entity_response):
"""Perform testing for sighting entity of custom threat intelligence python
module
ID: CCTRI-165-6fe55f8c-a148-4d7c-8a27-fbbec825819f
Steps:
1. Send POST request to create new sighting entity using custom python
module
2. Send GET request using custom python module to read just created
entity back.
3. Send same GET request, but using direct access to the server
4. Compare results
5. Validate that GET request of external_id returns number of
external_id
6. Validate that GET sighting request returns observable and type of
created entity
7. Update sighting entity using custom python module
8. Repeat GET request using python module and validate that entity was
updated
9. Delete entity from the system
Expected results: Sighting entity can be created, fetched, updated and
deleted using custom python module. Data stored in the entity is
the same no matter you access it directly or using our tool
Importance: Critical
"""
sighting = get_entity('sighting')
# Create new entity using provided payload
sighting_post_tool_response = get_entity_response(
'sighting', SIGHTING_PAYLOAD)
values = {
key: sighting_post_tool_response[key] for key in [
'count',
'observed_time',
'confidence',
'type',
'schema_version',
'external_ids',
'observables'
]
}
assert values == SIGHTING_PAYLOAD
entity_id = sighting_post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_tool_response = sighting.get(entity_id)
get_direct_response = ctia_get_data(
target_url=SIGHTING,
entity_id=entity_id,
**{'headers': module_headers}
).json()
assert get_tool_response == get_direct_response
# Validate that GET request of external_id returns number of external_id
external_id_result = sighting.external_id(3)
assert external_id_result[0]['external_ids'] == ['3']
# Validate that GET sighting request returns observable and type of
# created entity
get_observable_of_sighting = sighting.sightings(
observable_type='ip', observable_value='123.421.123.1')
assert get_observable_of_sighting[0]['observables'][0]['value'] == \
'123.421.123.1'
assert get_observable_of_sighting[0]['observables'][0]['type'] == 'ip'
# Update entity values
put_tool_response = delayed_return(
sighting.put(id_=entity_id, payload=PUT_SIGHTING_PAYLOAD)
)
assert put_tool_response['confidence'] == 'Low'
get_tool_response = sighting.get(entity_id)
assert get_tool_response['confidence'] == 'Low'
def test_python_module_ctia_positive_sighting_search(get_entity):
"""Perform testing for sighting/search entity of custom threat
intelligence python module
ID: CCTRI-2848 - cbe1ae9b-8889-45d0-ac14-a4ec71c7208a
Steps:
1. Send POST request to create new sighting entity using
custom python module
2. Send GET request using custom python module to read just created
entity back.
3. Count entities after entity created
4. Delete entity from the system
5. Repeat GET request using python module and validate that entity was
deleted
6. Count entities after entity deleted
7. Compare the amount of entities after creating and deleting entities
Expected results: Sighting entity can be created, fetched, counted and
deleted using custom python module. Data stored in the entity is
the same no matter you access it directly or using our tool
Importance: Critical
"""
sighting = get_entity('sighting')
# Create new entity using provided payload
post_sighting_response = sighting.post(
payload=SIGHTING_PAYLOAD, params={'wait_for': 'true'})
entity_id = post_sighting_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_sighting_search = sighting.search.get(
params={'id': entity_id})
assert get_sighting_search[0]['type'] == 'sighting'
assert get_sighting_search[0]['schema_version'] == '1.1.3'
# Count entities after entity created
count_sighting_before_deleted = sighting.search.count()
# Delete the entity and make attempt to get it back to validate it is
# not there anymore
delayed_return(sighting.search.delete(params={
'id': entity_id, 'REALLY_DELETE_ALL_THESE_ENTITIES': 'true'}))
# Repeat GET request and validate that entity was deleted
assert sighting.search.get(params={'id': entity_id}) == []
# Count entities after entity deleted
count_sighting_after_deleted = sighting.search.count()
# Compare results of count_sighting_before_deleted
# and count_sighting_after_deleted
assert count_sighting_before_deleted != count_sighting_after_deleted
def test_python_module_ctia_positive_sighting_metric(
get_entity, get_entity_response):
"""Perform testing for sighting/metric endpoints of custom threat
intelligence python module
ID: CCTRI-2848 -edbab647-5ba8-4756-be13-4ebe96d4c899
Steps:
1. Send POST request to create new sighting entity using
custom python module
2. Send GET request using custom python module to read just created
entity back.
3. Send GET request to get type of metric/histogram endpoint
4. Send GET request to get type of metric/topn endpoint
5. Send GET request to get type of metric/cardinality endpoint
Expected results: Sighting entity can be created, fetched,
researched by metric's endpoints and deleted using custom python module.
Data stored in the entity is the same no matter you access it
directly or using our tool.
Importance: Critical
"""
sighting = get_entity('sighting')
# Create new entity using provided payload
post_sighting_response = get_entity_response('sighting', SIGHTING_PAYLOAD)
entity_id = post_sighting_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_created_sighting = sighting.get(entity_id)
assert get_created_sighting['type'] == 'sighting'
assert get_created_sighting['schema_version'] == '1.1.3'
# Send GET request to get type of metric/histogram endpoint
data_from = get_created_sighting['timestamp']
metric_histogram = sighting.metric.histogram(params={
'granularity': 'week', 'from': data_from, 'aggregate-on': 'timestamp'})
assert metric_histogram['type'] == 'histogram'
# Send GET request to get type of metric/topn endpoint
metric_topn = sighting.metric.topn(params={
'from': data_from, 'aggregate-on': 'source'})
assert metric_topn['type'] == 'topn'
# Send GET request to get type of metric/cardinality endpoint
metric_cardinality = sighting.metric.cardinality(params={
'from': data_from, 'aggregate-on': 'source'})
assert metric_cardinality['type'] == 'cardinality'
def test_python_module_ctia_positive_target_record(
module_headers, get_entity, get_entity_response):
"""Perform testing for target_record entity of custom threat intelligence
python module
ID: CCTRI-2906 - 3392e79b-b8c7-4ff8-b261-a1032bc78cbd
Steps:
1. Send POST request to create new target_record entity using custom
python module
2. Send GET request using custom python module to read just created
entity back.
3. Send same GET request, but using direct access to the server
4. Compare results
5. Validate that GET request of external_id returns number of
external_id
6. Update target_record entity using custom python module
7. Repeat GET request using python module and validate that entity was
updated
8. Delete entity from the system
Expected results: Sighting entity can be created, fetched, updated and
deleted using custom python module. Data stored in the entity is
the same no matter you access it directly or using our tool
Importance: Critical
"""
target_record = get_entity('target_record')
# Create new entity using provided payload
post_tool_response = get_entity_response(
'target_record', TARGET_RECORD_PAYLOAD)
values = {
key: post_tool_response[key] for key in [
'source',
'targets',
'type',
'schema_version',
'external_ids'
]
}
assert values == TARGET_RECORD_PAYLOAD
entity_id = post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_tool_response = target_record.get(entity_id)
get_direct_response = ctia_get_data(
target_url=TARGET_RECORD,
entity_id=entity_id,
**{'headers': module_headers}
).json()
assert get_tool_response == get_direct_response
# Validate that GET request of external_id returns appropriate value
external_id_result = target_record.external_id(3)
assert external_id_result[0]['external_ids'] == ['3']
# Update entity values
put_tool_response = delayed_return(
target_record.put(
id_=entity_id,
payload=PUT_TARGET_RECORD_PAYLOAD)
)
assert put_tool_response['source'] == 'Updated source'
get_tool_response = target_record.get(entity_id)
assert get_tool_response['source'] == 'Updated source'
def test_python_module_ctia_positive_target_record_search(get_entity):
"""Perform testing for target_record/search entity of custom threat
intelligence python module
ID: CCTRI-2906 - b1fd55c7-cbae-43c7-a246-725948563e96
Steps:
1. Send POST request to create new target_record entity using
custom python module
2. Send GET request using custom python module to read just created
entity back.
3. Count entities after entity created
4. Delete entity from the system
5. Repeat GET request using python module and validate that entity was
deleted
6. Count entities after entity deleted
7. Compare the amount of entities after creating and deleting entities
Expected results: Target_record entity can be created, fetched, counted and
deleted using custom python module. Data stored in the entity is
the same no matter you access it directly or using our tool
Importance: Critical
"""
target_record = get_entity('target_record')
# Create new entity using provided payload
post_tool_response = target_record.post(payload=TARGET_RECORD_PAYLOAD,
params={'wait_for': 'true'})
entity_id = post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_target_record_search = target_record.search.get(
params={'id': entity_id})
assert get_target_record_search[0]['type'] == 'target-record'
assert get_target_record_search[0]['schema_version'] == '1.1.3'
# Count entities after entity created
count_target_record_before_deleted = target_record.search.count()
# Delete the entity and make attempt to get it back to validate it is
# not there anymore
delayed_return(target_record.search.delete(params={
'id': entity_id, 'REALLY_DELETE_ALL_THESE_ENTITIES': 'true'}))
# Repeat GET request and validate that entity was deleted
assert target_record.search.get(params={'id': entity_id}) == []
# Count entities after entity deleted
count_target_record_after_deleted = target_record.search.count()
# Compare results of count_target_record_before_deleted
# and count_target_record_after_deleted
assert count_target_record_before_deleted !=\
count_target_record_after_deleted
def test_python_module_ctia_positive_target_record_metric(
get_entity, get_entity_response):
"""Perform testing for target_record/metric endpoints of custom threat
intelligence python module
ID: CCTRI-2906 -e3426742-294f-406a-9fb0-06958c369c3d
Steps:
1. Send POST request to create new target_record entity using
custom python module
2. Send GET request using custom python module to read just created
entity back.
3. Send GET request to get type of metric/histogram endpoint
4. Send GET request to get type of metric/topn endpoint
5. Send GET request to get type of metric/cardinality endpoint
Expected results: Target_record entity can be created, fetched,
researched by metric's endpoints and deleted using custom python module.
Data stored in the entity is the same no matter you access it
directly or using our tool.
Importance: Critical
"""
target_record = get_entity('target_record')
# Create new entity using provided payload
post_tool_response = get_entity_response(
'target_record', TARGET_RECORD_PAYLOAD)
entity_id = post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_created_target_record = target_record.get(entity_id)
assert get_created_target_record['type'] == 'target-record'
assert get_created_target_record['schema_version'] == '1.1.3'
# Send GET request to get type of metric/histogram endpoint
data_from = get_created_target_record['timestamp']
metric_histogram = target_record.metric.histogram(params={
'granularity': 'week', 'from': data_from, 'aggregate-on': 'timestamp'})
assert metric_histogram['type'] == 'histogram'
# Send GET request to get type of metric/topn endpoint
metric_topn = target_record.metric.topn(params={
'from': data_from, 'aggregate-on': 'source'})
assert metric_topn['type'] == 'topn'
# Send GET request to get type of metric/cardinality endpoint
metric_cardinality = target_record.metric.cardinality(params={
'from': data_from, 'aggregate-on': 'source'})
assert metric_cardinality['type'] == 'cardinality'
def test_python_module_ctia_positive_status(get_entity):
"""Perform testing for status endpoint using custom threat intelligence
python module
ID: CCTRI-167-29cdff9c-0d48-4f73-acdb-b77795e3ad0f
Steps:
1. Send GET request to server using custom python module
2. Validate returned data
Expected results: Response contains information about server health status
Importance: Critical
"""
status = get_entity('status')
server_status = status.get()
assert server_status['status'] == 'ok'
def test_python_module_ctia_positive_tool(
module_headers, get_entity, get_entity_response):
"""Perform testing for tool entity of custom threat intelligence python
module
ID: CCTRI-166-ebdfccab-a751-43fe-974f-037da0b10153
Steps:
1. Send POST request to create new tool entity using custom python
module
2. Send GET request using custom python module to read just created
entity back.
3. Send same GET request, but using direct access to the server
4. Compare results
5. Validate that GET request of external_id returns number of
external_id
6. Update tool entity using custom python module
7. Repeat GET request using python module and validate that entity was
updated
8. Delete entity from the system
Expected results: Tool entity can be created, fetched, updated and
deleted using custom python module. Data stored in the entity is
the same no matter you access it directly or using our tool
Importance: Critical
"""
tool = get_entity('tool')
post_tool_response = get_entity_response('tool', TOOL_PAYLOAD)
values = {
key: post_tool_response[key] for key in [
'labels',
'type',
'schema_version',
'description',
'title',
'short_description',
'external_ids'
]
}
assert values == TOOL_PAYLOAD
entity_id = post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_tool_response = tool.get(entity_id)
get_direct_response = ctia_get_data(
target_url=TOOL,
entity_id=entity_id,
**{'headers': module_headers}
).json()
assert get_tool_response == get_direct_response
# Validate that GET request of external_id returns number of external_id
external_id_result = tool.external_id(3)
assert external_id_result[0]['external_ids'] == ['3']
# Update entity values
put_tool_response = delayed_return(
tool.put(id_=entity_id, payload=PUT_TOOL_PAYLOAD)
)
assert put_tool_response['title'] == 'Changed title for test'
get_tool_response = tool.get(entity_id)
assert get_tool_response['title'] == 'Changed title for test'
def test_python_module_ctia_positive_tool_search(get_entity):
"""Perform testing for tool/search entity of custom threat
intelligence python module
ID: CCTRI-2848 - cbe1ae9b-8889-45d0-ac14-a4ec71c7208a
Steps:
1. Send POST request to create new tool entity using
custom python module
2. Send GET request using custom python module to read just created
entity back.
3. Count entities after entity created
4. Delete entity from the system
5. Repeat GET request using python module and validate that entity was
deleted
6. Count entities after entity deleted
7. Compare the amount of entities after creating and deleting entities
Expected results: Tool entity can be created, fetched, counted and
deleted using custom python module. Data stored in the entity is
the same no matter you access it directly or using our tool
Importance: Critical
"""
tool = get_entity('tool')
# Create new entity using provided payload
post_tool_response = tool.post(payload=TOOL_PAYLOAD,
params={'wait_for': 'true'})
entity_id = post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_tool_search = tool.search.get(
params={'id': entity_id})
assert get_tool_search[0]['type'] == 'tool'
assert get_tool_search[0]['schema_version'] == '1.1.3'
# Count entities after entity created
count_tool_before_deleted = tool.search.count()
# Delete the entity and make attempt to get it back to validate it is
# not there anymore
delayed_return(tool.search.delete(params={
'id': entity_id, 'REALLY_DELETE_ALL_THESE_ENTITIES': 'true'}))
# Repeat GET request and validate that entity was deleted
assert tool.search.get(params={'id': entity_id}) == []
# Count entities after entity deleted
count_tool_after_deleted = tool.search.count()
# Compare results of count_tool_before_deleted
# and count_tool_after_deleted
assert count_tool_before_deleted != count_tool_after_deleted
def test_python_module_ctia_positive_tool_metric(
get_entity, get_entity_response):
"""Perform testing for tool/metric endpoints of custom threat
intelligence python module
ID: CCTRI-2848 -edbab647-5ba8-4756-be13-4ebe96d4c899
Steps:
1. Send POST request to create new tool entity using
custom python module
2. Send GET request using custom python module to read just created
entity back.
3. Send GET request to get type of metric/histogram endpoint
4. Send GET request to get type of metric/topn endpoint
5. Send GET request to get type of metric/cardinality endpoint
Expected results: Tool entity can be created, fetched,
researched by metric's endpoints and deleted using custom python module.
Data stored in the entity is the same no matter you access it
directly or using our tool.
Importance: Critical
"""
tool = get_entity('tool')
# Create new entity using provided payload
post_tool_response = get_entity_response('tool', TOOL_PAYLOAD)
entity_id = post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_created_tool = tool.get(entity_id)
assert get_created_tool['type'] == 'tool'
assert get_created_tool['schema_version'] == '1.1.3'
# Send GET request to get type of metric/histogram endpoint
data_from = get_created_tool['timestamp']
metric_histogram = tool.metric.histogram(params={
'granularity': 'week', 'from': data_from, 'aggregate-on': 'timestamp'})
assert metric_histogram['type'] == 'histogram'
# Send GET request to get type of metric/topn endpoint
metric_topn = tool.metric.topn(params={
'from': data_from, 'aggregate-on': 'source'})
assert metric_topn['type'] == 'topn'
# Send GET request to get type of metric/cardinality endpoint
metric_cardinality = tool.metric.cardinality(params={
'from': data_from, 'aggregate-on': 'source'})
assert metric_cardinality['type'] == 'cardinality'
def test_python_module_ctia_positive_verdict(
module_headers, get_entity, get_entity_response):
"""Perform testing for verdict entity of custom threat intelligence python
module
ID: CCTRI-166-ebdfccab-a751-43fe-974f-037da0b10153
Steps:
1. Send POST request to create new judgement entity using custom python
module to provide source data for verdict entity
2. Send GET request using custom python module to read verdict entity
based on just created one.
3. Send same GET request, but using direct access to the server
4. Compare results
Expected results: Verdict entity can be fetched using custom python module.
Data stored in the entity is the same no matter you access it directly
or using our tool
Importance: Critical
"""
# Create new judgement entity to be used for verdict
judgement_post_tool_response = get_entity_response(
'judgement', JUDGEMENT_PAYLOAD)
observable_type = judgement_post_tool_response['observable']['type']
observable_value = judgement_post_tool_response['observable']['value']
# Validate that GET request return same data for direct access and access
# through custom python module
verdict = get_entity('verdict')
verdict_get_tool_response = verdict.get(observable_type, observable_value)
assert verdict_get_tool_response['type'] == 'verdict'
get_direct_response = ctia_get_data(
target_url=VERDICT.format(observable_type, observable_value),
**{'headers': module_headers}
).json()
assert verdict_get_tool_response == get_direct_response
def test_python_module_ctia_positive_version(get_entity):
"""Perform testing for version endpoint using custom threat intelligence
python module
ID: CCTRI-167-0d9be838-5aad-4f81-99bd-ead69a9c2d08
Steps:
1. Send GET request to server using custom python module
2. Validate returned data
Expected results: Response contains information about server version
Importance: Critical
"""
version = get_entity('version')
server_version = version.get()
assert server_version['base'] == '/ctia'
assert server_version['ctim-version'] == '1.1.3'
def test_python_module_ctia_positive_vulnerability(
module_headers, get_entity, get_entity_response):
"""Perform testing for vulnerability entity of custom threat intelligence
python module
ID: CCTRI-168-4a43be85-6d16-46db-b54f-6b05e9b68ab2
Steps:
1. Send POST request to create new vulnerability entity using custom
python module
2. Send GET request using custom python module to read just created
entity back.
3. Send same GET request, but using direct access to the server
4. Compare results
5. Validate that GET request of external_id returns number of
external_id
6. Update vulnerability entity using custom python module
7. Repeat GET request using python module and validate that entity was
updated
Expected results: Vulnerability entity can be created, fetched, updated and
deleted using custom python module. Data stored in the entity is
the same no matter you access it directly or using our tool
Importance: Critical
"""
vulnerability = get_entity('vulnerability')
# Create new entity using provided payload
vulnerability_post_tool_response = get_entity_response(
'vulnerability', VULNERABILITY_PAYLOAD)
values = {
key: vulnerability_post_tool_response[key] for key in [
'description',
'type',
'schema_version',
'external_ids'
]
}
assert values == VULNERABILITY_PAYLOAD
entity_id = vulnerability_post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_tool_response = vulnerability.get(entity_id)
get_direct_response = ctia_get_data(
target_url=VULNERABILITY,
entity_id=entity_id,
**{'headers': module_headers}
).json()
assert get_tool_response == get_direct_response
# Validate that GET request of external_id returns number of external_id
external_id_result = vulnerability.external_id(3)
assert external_id_result[0]['external_ids'] == ['3']
# Validate that GET request of cpe_match_strings endpoint returns result
get_cpe_match_strings_data =\
vulnerability.cpe_match_strings(
params={'cpe23_match_strings': 'cpe:2.3:a:google:chrome:8.0:'
'beta:*:*:*:*:*:*'})
assert get_cpe_match_strings_data == []
# Update entity values
put_tool_response = delayed_return(
vulnerability.put(
id_=entity_id,
payload={'description': 'New browser vulnerability'}
)
)
assert put_tool_response['description'] == 'New browser vulnerability'
get_tool_response = vulnerability.get(entity_id)
assert get_tool_response['description'] == 'New browser vulnerability'
def test_python_module_ctia_positive_vulnerability_search(get_entity):
"""Perform testing for vulnerability/search entity of custom threat
intelligence python module
ID: CCTRI-2848 - 642bcca5-3eec-4955-b395-e4c365b65bf5
Steps:
1. Send POST request to create new vulnerability entity using
custom python module
2. Send GET request using custom python module to read just created
entity back.
3. Count entities after entity created
4. Delete entity from the system
5. Repeat GET request using python module and validate that entity was
deleted
6. Count entities after entity deleted
7. Compare the amount of entities after creating and deleting entities
Expected results: Vulnerability entity can be created, fetched, counted and
deleted using custom python module. Data stored in the entity is
the same no matter you access it directly or using our tool
Importance: Critical
"""
vulnerability = get_entity('vulnerability')
# Create new entity using provided payload
vulnerability_post_tool_response = vulnerability.post(
payload=VULNERABILITY_PAYLOAD, params={'wait_for': 'true'})
entity_id = vulnerability_post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_vulnerability_search = vulnerability.search.get(
params={'id': entity_id})
assert get_vulnerability_search[0]['type'] == 'vulnerability'
assert get_vulnerability_search[0]['schema_version'] == '1.1.3'
# Count entities after entity created
count_vulnerability_before_deleted = vulnerability.search.count()
# Delete the entity and make attempt to get it back to validate it is
# not there anymore
delayed_return(vulnerability.search.delete(params={
'id': entity_id, 'REALLY_DELETE_ALL_THESE_ENTITIES': 'true'}))
# Repeat GET request and validate that entity was deleted
assert vulnerability.search.get(params={'id': entity_id}) == []
# Count entities after entity deleted
count_vulnerability_after_deleted = vulnerability.search.count()
# Compare results of count_vulnerability_before_deleted
# and count_vulnerability_after_deleted
assert count_vulnerability_before_deleted !=\
count_vulnerability_after_deleted
def test_python_module_ctia_positive_vulnerability_metric(
get_entity, get_entity_response):
"""Perform testing for vulnerability/metric endpoints of custom threat
intelligence python module
ID: CCTRI-2848 -1b6c327c-cf55-4e22-a72c-93f9ad4b2763
Steps:
1. Send POST request to create new vulnerability entity using
custom python module
2. Send GET request using custom python module to read just created
entity back.
3. Send GET request to get type of metric/histogram endpoint
4. Send GET request to get type of metric/topn endpoint
5. Send GET request to get type of metric/cardinality endpoint
Expected results: Vulnerability entity can be created, fetched,
researched by metric's endpoints and deleted using custom python module.
Data stored in the entity is the same no matter you access it
directly or using our tool.
Importance: Critical
"""
vulnerability = get_entity('vulnerability')
vulnerability_post_tool_response = get_entity_response(
'vulnerability', VULNERABILITY_PAYLOAD)
entity_id = vulnerability_post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_created_vulnerability = vulnerability.get(entity_id)
assert get_created_vulnerability['type'] == 'vulnerability'
assert get_created_vulnerability['schema_version'] == '1.1.3'
# Send GET request to get type of metric/histogram endpoint
data_from = get_created_vulnerability['timestamp']
metric_histogram = vulnerability.metric.histogram(params={
'granularity': 'week', 'from': data_from, 'aggregate-on': 'timestamp'})
assert metric_histogram['type'] == 'histogram'
# Send GET request to get type of metric/topn endpoint
metric_topn = vulnerability.metric.topn(params={
'from': data_from, 'aggregate-on': 'source'})
assert metric_topn['type'] == 'topn'
# Send GET request to get type of metric/cardinality endpoint
metric_cardinality = vulnerability.metric.cardinality(params={
'from': data_from, 'aggregate-on': 'source'})
assert metric_cardinality['type'] == 'cardinality'
def test_python_module_ctia_positive_weakness(
module_headers, get_entity, get_entity_response):
"""Perform testing for weakness entity of custom threat intelligence python
module
ID: CCTRI-168-7de38006-e939-4a2a-b2d8-b752d3527182
Steps:
1. Send POST request to create new weakness entity using custom python
module
2. Send GET request using custom python module to read just created
entity back.
3. Send same GET request, but using direct access to the server
4. Compare results
5. Validate that GET request of external_id returns number of
external_id
6. Update weakness entity using custom python module
7. Repeat GET request using python module and validate that entity was
updated
Expected results: Weakness entity can be created, fetched, updated and
deleted using custom python module. Data stored in the entity is
the same no matter you access it directly or using our tool
Importance: Critical
"""
weakness = get_entity('weakness')
# Create new entity using provided payload
post_tool_response = get_entity_response('weakness', WEAKNESS_PAYLOAD)
values = {
key: post_tool_response[key] for key in [
'description',
'likelihood',
'type',
'schema_version',
'external_ids'
]
}
assert values == WEAKNESS_PAYLOAD
entity_id = post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_tool_response = weakness.get(entity_id)
get_direct_response = ctia_get_data(
target_url=WEAKNESS,
entity_id=entity_id,
**{'headers': module_headers}
).json()
assert get_tool_response == get_direct_response
# Validate that GET request of external_id returns number of external_id
external_id_result = weakness.external_id(3)
assert external_id_result[0]['external_ids'] == ['3']
# Update entity values
put_tool_response = delayed_return(
weakness.put(
id_=entity_id,
payload={'likelihood': 'High', 'description': 'New description'}
)
)
assert put_tool_response['likelihood'] == 'High'
assert put_tool_response['description'] == 'New description'
get_tool_response = weakness.get(entity_id)
assert get_tool_response['likelihood'] == 'High'
assert get_tool_response['description'] == 'New description'
def test_python_module_ctia_positive_weakness_search(get_entity):
"""Perform testing for weakness/search entity of custom threat
intelligence python module
ID: CCTRI-2848 - a01b4f84-9661-4b67-ac94-cc5ce4ec3cb9
Steps:
1. Send POST request to create new weakness entity using custom python
module
2. Send GET request using custom python module to read just created
entity back.
3. Count entities after entity created
4. Delete entity from the system
5. Repeat GET request using python module and validate that entity was
deleted
6. Count entities after entity deleted
7. Compare the amount of entities after creating and deleting entities
Expected results: Weakness entity can be created, fetched, counted and
deleted using custom python module. Data stored in the entity is
the same no matter you access it directly or using our tool
Importance: Critical
"""
weakness = get_entity('weakness')
# Create new entity using provided payload
post_tool_response = weakness.post(
payload=WEAKNESS_PAYLOAD, params={'wait_for': 'true'})
values = {
key: post_tool_response[key] for key in [
'description',
'likelihood',
'type',
'schema_version',
'external_ids'
]
}
assert values == WEAKNESS_PAYLOAD
# Create variable for using it in params for endpoints
entity_id = post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_weakness_search = weakness.search.get(params={'id': entity_id})
assert get_weakness_search[0]['type'] == 'weakness'
assert get_weakness_search[0]['schema_version'] == '1.1.3'
# Count entities after entity created
count_weakness_before_deleted = weakness.search.count()
# Delete the entity and make attempt to get it back to validate it is
# not there anymore
delayed_return(weakness.search.delete(params={
'id': entity_id, 'REALLY_DELETE_ALL_THESE_ENTITIES': 'true'}))
# Repeat GET request and validate that entity was deleted
assert weakness.search.get(params={'id': entity_id}) == []
# Count entities after entity deleted
count_weakness_after_deleted = weakness.search.count()
# Compare results of count_weakness_before_deleted
# and count_weakness_after_deleted
assert count_weakness_before_deleted != count_weakness_after_deleted
def test_python_module_ctia_positive_weakness_metric(
get_entity, get_entity_response):
"""Perform testing for weakness/metric endpoints of custom threat
intelligence python module
ID: CCTRI-2848 -52c89f1b-9728-41d6-8a1f-07dd0ec8b976
Steps:
1. Send POST request to create new weakness entity using custom python
module
2. Send GET request using custom python module to read just created
entity back.
3. Send GET request to get type of metric/histogram endpoint
4. Send GET request to get type of metric/topn endpoint
5. Send GET request to get type of metric/cardinality endpoint
Expected results: Weakness entity can be created, fetched, researched by
metric's endpoints and deleted using custom python module.
Data stored in the entity is the same no matter you access it
directly or using our tool.
Importance: Critical
"""
weakness = get_entity('weakness')
# Create new entity using provided payload
post_tool_response = get_entity_response('weakness', WEAKNESS_PAYLOAD)
# Create variable for using it in params for endpoints
entity_id = post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_created_weakness = weakness.get(entity_id)
assert get_created_weakness['type'] == 'weakness'
assert get_created_weakness['likelihood'] == 'Medium'
assert get_created_weakness['schema_version'] == '1.1.3'
# Send GET request to get type of metric/histogram endpoint
data_from = get_created_weakness['timestamp']
metric_histogram = weakness.metric.histogram(params={
'granularity': 'week', 'from': data_from, 'aggregate-on': 'timestamp'})
assert metric_histogram['type'] == 'histogram'
# Send GET request to get type of metric/topn endpoint
metric_topn = weakness.metric.topn(params={
'from': data_from, 'aggregate-on': 'source'})
assert metric_topn['type'] == 'topn'
# Send GET request to get type of metric/cardinality endpoint
metric_cardinality = weakness.metric.cardinality(params={
'from': data_from, 'aggregate-on': 'source'})
assert metric_cardinality['type'] == 'cardinality'
| 41.790952 | 79 | 0.692422 | 24,554 | 194,913 | 5.305164 | 0.030871 | 0.04864 | 0.045462 | 0.045907 | 0.871882 | 0.835571 | 0.794953 | 0.763003 | 0.730921 | 0.704682 | 0 | 0.01952 | 0.236454 | 194,913 | 4,663 | 80 | 41.799914 | 0.855757 | 0.458548 | 0 | 0.48834 | 0 | 0 | 0.143544 | 0.010814 | 0 | 0 | 0 | 0 | 0.192631 | 1 | 0.035914 | false | 0 | 0.004198 | 0 | 0.040112 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
e7a102213500784b4361433374774ff24bbfd881 | 107 | py | Python | tests/support/tree/system/__init__.py | imhuwq/fabric | 7c105e3928ff46c2e10588d1d2c86f5a68d8ce1a | [
"BSD-2-Clause"
] | 802 | 2015-10-24T16:53:07.000Z | 2022-03-30T11:00:45.000Z | tests/support/tree/system/__init__.py | imhuwq/fabric | 7c105e3928ff46c2e10588d1d2c86f5a68d8ce1a | [
"BSD-2-Clause"
] | 47 | 2015-12-11T17:10:10.000Z | 2019-04-21T11:57:41.000Z | tests/support/tree/system/__init__.py | imhuwq/fabric | 7c105e3928ff46c2e10588d1d2c86f5a68d8ce1a | [
"BSD-2-Clause"
] | 94 | 2015-11-20T07:27:58.000Z | 2022-01-19T09:32:36.000Z | from fabric.api import task
from support.tree.system import debian
@task
def install_package():
pass
| 13.375 | 38 | 0.766355 | 16 | 107 | 5.0625 | 0.8125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.168224 | 107 | 7 | 39 | 15.285714 | 0.910112 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | true | 0.2 | 0.4 | 0 | 0.6 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 6 |
e7b98694da99cf27367951f8b0deb5856dbe5da0 | 53 | py | Python | sys_argv.py | tdcalvo/ei_swc_2017 | a09e11f016d5759fe3e18a28be7c5ab5a3fd43c4 | [
"MIT"
] | null | null | null | sys_argv.py | tdcalvo/ei_swc_2017 | a09e11f016d5759fe3e18a28be7c5ab5a3fd43c4 | [
"MIT"
] | null | null | null | sys_argv.py | tdcalvo/ei_swc_2017 | a09e11f016d5759fe3e18a28be7c5ab5a3fd43c4 | [
"MIT"
] | null | null | null | import sys
print('sys.argv', sys.argv)
print('test')
| 13.25 | 27 | 0.698113 | 9 | 53 | 4.111111 | 0.555556 | 0.378378 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.09434 | 53 | 3 | 28 | 17.666667 | 0.770833 | 0 | 0 | 0 | 0 | 0 | 0.226415 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.333333 | 0 | 0.333333 | 0.666667 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 1 | 0 | 6 |
e7bc5bb2f5e84a77469ccd81f4d892c6c32c67e0 | 38 | py | Python | tests/core/tests.py | traceplusplus/traceplus-python | eb20bf8840fed4c789157cacf85eed6fa45a2f26 | [
"MIT"
] | null | null | null | tests/core/tests.py | traceplusplus/traceplus-python | eb20bf8840fed4c789157cacf85eed6fa45a2f26 | [
"MIT"
] | null | null | null | tests/core/tests.py | traceplusplus/traceplus-python | eb20bf8840fed4c789157cacf85eed6fa45a2f26 | [
"MIT"
] | null | null | null |
from traceplus.conf import Settings
| 9.5 | 35 | 0.815789 | 5 | 38 | 6.2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.157895 | 38 | 3 | 36 | 12.666667 | 0.96875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
e7ce5afae728c399e5800b245c7bb904fafe8b69 | 122 | py | Python | c2vb/__init__.py | Gesrua/c2vb | 91d6413567de5b12d07538fd09114ae089d8623e | [
"MIT"
] | 2 | 2020-02-05T09:30:31.000Z | 2020-02-16T13:01:04.000Z | c2vb/__init__.py | Gesrua/c2vb | 91d6413567de5b12d07538fd09114ae089d8623e | [
"MIT"
] | null | null | null | c2vb/__init__.py | Gesrua/c2vb | 91d6413567de5b12d07538fd09114ae089d8623e | [
"MIT"
] | null | null | null | from .lexer import Lexer
from .parser import Parser
from .main import run
from .main import console
from .idt import proc
| 20.333333 | 26 | 0.795082 | 20 | 122 | 4.85 | 0.45 | 0.164948 | 0.28866 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.163934 | 122 | 5 | 27 | 24.4 | 0.95098 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
e7dd4417b227e10a5042e079d68e810db1072443 | 31 | py | Python | docker/threatconnect-tcex/verify.py | ThisIsNotTheUserYouAreLookingFor/dockerfiles | f92673b0d15c457e4abe215cf260afbb5b25cf2e | [
"MIT"
] | 48 | 2018-12-12T12:18:09.000Z | 2022-03-05T02:23:42.000Z | docker/threatconnect-tcex/verify.py | ThisIsNotTheUserYouAreLookingFor/dockerfiles | f92673b0d15c457e4abe215cf260afbb5b25cf2e | [
"MIT"
] | 7,201 | 2018-12-24T17:14:17.000Z | 2022-03-31T13:39:12.000Z | docker/threatconnect-tcex/verify.py | ThisIsNotTheUserYouAreLookingFor/dockerfiles | f92673b0d15c457e4abe215cf260afbb5b25cf2e | [
"MIT"
] | 94 | 2018-12-17T10:59:21.000Z | 2022-03-29T12:59:30.000Z | import tcex
print("All good")
| 7.75 | 17 | 0.709677 | 5 | 31 | 4.4 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.16129 | 31 | 3 | 18 | 10.333333 | 0.846154 | 0 | 0 | 0 | 0 | 0 | 0.258065 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.5 | 0 | 0.5 | 0.5 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 1 | 0 | 6 |
e7dd8bf76c3208d9e7a2c2d47dce61030eeecbf9 | 33 | py | Python | geojson_quirks/__init__.py | perrygeo/geojson-quirks | 1eded7ffba3529987ba3e4d1842c54bdf3035188 | [
"MIT"
] | 10 | 2016-02-08T23:39:14.000Z | 2020-10-29T21:19:13.000Z | geojson_quirks/__init__.py | perrygeo/geojson-quirks | 1eded7ffba3529987ba3e4d1842c54bdf3035188 | [
"MIT"
] | 4 | 2016-03-14T12:09:29.000Z | 2018-07-11T14:21:40.000Z | geojson_quirks/__init__.py | perrygeo/geojson-quirks | 1eded7ffba3529987ba3e4d1842c54bdf3035188 | [
"MIT"
] | 4 | 2017-04-12T01:23:05.000Z | 2021-04-17T16:30:45.000Z | from .tweak import tweak_feature
| 16.5 | 32 | 0.848485 | 5 | 33 | 5.4 | 0.8 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.121212 | 33 | 1 | 33 | 33 | 0.931034 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
99e4376e963aa8e60737ac02479af4f5f3761d88 | 180 | py | Python | exercicios/ex107/moeda.py | CarlosWillian/python | f863578245fbf402e5b46f844a247355afed0d62 | [
"MIT"
] | null | null | null | exercicios/ex107/moeda.py | CarlosWillian/python | f863578245fbf402e5b46f844a247355afed0d62 | [
"MIT"
] | null | null | null | exercicios/ex107/moeda.py | CarlosWillian/python | f863578245fbf402e5b46f844a247355afed0d62 | [
"MIT"
] | null | null | null | def aumentar(p, taxa):
return p * (taxa / 100 + 1)
def diminuir(p, taxa):
return p * (1 - taxa / 100)
def dobro(p):
return p * 2
def metade(p):
return p / 2
| 11.25 | 31 | 0.538889 | 30 | 180 | 3.233333 | 0.366667 | 0.28866 | 0.226804 | 0.247423 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.081301 | 0.316667 | 180 | 15 | 32 | 12 | 0.707317 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.5 | false | 0 | 0 | 0.5 | 1 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 6 |
82372d254eeda58d51db9aabd3268aa9b226f59b | 36 | py | Python | testscript.py | lavjams/BI-Demo | 2ff4aeb9dc71eeb1aa9e1f6510a79994c6c20ef1 | [
"MIT"
] | null | null | null | testscript.py | lavjams/BI-Demo | 2ff4aeb9dc71eeb1aa9e1f6510a79994c6c20ef1 | [
"MIT"
] | null | null | null | testscript.py | lavjams/BI-Demo | 2ff4aeb9dc71eeb1aa9e1f6510a79994c6c20ef1 | [
"MIT"
] | null | null | null |
print 'Hello, Banneker Institute!' | 12 | 34 | 0.75 | 4 | 36 | 6.75 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.138889 | 36 | 3 | 34 | 12 | 0.870968 | 0 | 0 | 0 | 0 | 0 | 0.742857 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0 | null | null | 1 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 6 |
412dd1820def7372e0d52d26bb542c772abf58dc | 139 | py | Python | sample/views.py | noopurphalak/sample-django-package | 4249c5e0f110eecc798e08c54cd9ce7ef32daf19 | [
"MIT"
] | 1 | 2022-01-30T17:06:10.000Z | 2022-01-30T17:06:10.000Z | sample/views.py | noopurphalak/sample-django-package | 4249c5e0f110eecc798e08c54cd9ce7ef32daf19 | [
"MIT"
] | null | null | null | sample/views.py | noopurphalak/sample-django-package | 4249c5e0f110eecc798e08c54cd9ce7ef32daf19 | [
"MIT"
] | 1 | 2022-01-30T09:38:54.000Z | 2022-01-30T09:38:54.000Z | from django.http import JsonResponse
# Create your views here.
def hello(request):
return JsonResponse({"greeting": "Hello World"})
| 17.375 | 52 | 0.733813 | 17 | 139 | 6 | 0.882353 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.158273 | 139 | 7 | 53 | 19.857143 | 0.871795 | 0.165468 | 0 | 0 | 0 | 0 | 0.166667 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | false | 0 | 0.333333 | 0.333333 | 1 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 6 |
68d4f31d81f3cc3c62bbbcac9d33bb2e6c9f5ea3 | 28 | py | Python | lib/scraper/__init__.py | jayrav13/presidency | f18721d5df9af161cc01f503b6657d9b06fea0e9 | [
"MIT"
] | 14 | 2016-11-05T03:43:26.000Z | 2021-03-25T14:55:19.000Z | lib/scraper/__init__.py | jayrav13/presidency | f18721d5df9af161cc01f503b6657d9b06fea0e9 | [
"MIT"
] | 5 | 2017-01-30T21:39:34.000Z | 2021-06-10T19:30:57.000Z | lib/scraper/__init__.py | jayrav13/presidency | f18721d5df9af161cc01f503b6657d9b06fea0e9 | [
"MIT"
] | 2 | 2016-11-22T08:36:07.000Z | 2017-01-28T16:36:29.000Z | from .scraper import Scraper | 28 | 28 | 0.857143 | 4 | 28 | 6 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.107143 | 28 | 1 | 28 | 28 | 0.96 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
ec02a2669ce5cb816af40b40a55847149dd5469b | 45 | py | Python | 1004.py | heltonricardo/URI | 160cca22d94aa667177c9ebf2a1c9864c5e55b41 | [
"MIT"
] | 6 | 2021-04-13T00:33:43.000Z | 2022-02-10T10:23:59.000Z | 1004.py | heltonricardo/URI | 160cca22d94aa667177c9ebf2a1c9864c5e55b41 | [
"MIT"
] | null | null | null | 1004.py | heltonricardo/URI | 160cca22d94aa667177c9ebf2a1c9864c5e55b41 | [
"MIT"
] | 3 | 2021-03-23T18:42:24.000Z | 2022-02-10T10:24:07.000Z | print('PROD =', int(input()) * int(input()))
| 22.5 | 44 | 0.555556 | 6 | 45 | 4.166667 | 0.666667 | 0.64 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.111111 | 45 | 1 | 45 | 45 | 0.625 | 0 | 0 | 0 | 0 | 0 | 0.133333 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 6 |
ec181f20118e38869f7e360973252d13a3e81393 | 48 | py | Python | vibora/multipart/__init__.py | brettcannon/vibora | 1933b631d4df62e7d748016f7463ab746d4695cc | [
"MIT"
] | 6,238 | 2018-06-14T19:29:47.000Z | 2022-03-29T21:42:03.000Z | vibora/multipart/__init__.py | LL816/vibora | 4cda888f89aec6bfb2541ee53548ae1bf50fbf1b | [
"MIT"
] | 213 | 2018-06-13T20:13:59.000Z | 2022-03-26T07:46:49.000Z | vibora/multipart/__init__.py | LL816/vibora | 4cda888f89aec6bfb2541ee53548ae1bf50fbf1b | [
"MIT"
] | 422 | 2018-06-20T01:29:41.000Z | 2022-02-27T16:45:29.000Z | from .parser import *
from .containers import *
| 16 | 25 | 0.75 | 6 | 48 | 6 | 0.666667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.166667 | 48 | 2 | 26 | 24 | 0.9 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
ec24a31604f87a397ddebce3737b7738ab8541c2 | 17,837 | py | Python | utils/tfrecord.py | leosampaio/scene-designer | 8a7276067acfde1997d386942aabc44d92436a1a | [
"MIT"
] | 9 | 2021-08-18T17:49:42.000Z | 2022-02-22T02:15:07.000Z | utils/tfrecord.py | leosampaio/scene-designer | 8a7276067acfde1997d386942aabc44d92436a1a | [
"MIT"
] | null | null | null | utils/tfrecord.py | leosampaio/scene-designer | 8a7276067acfde1997d386942aabc44d92436a1a | [
"MIT"
] | 1 | 2021-10-02T19:53:03.000Z | 2021-10-02T19:53:03.000Z | import tensorflow as tf
def _bytes_feature(value):
"""Returns a bytes_list from a string / byte."""
if isinstance(value, type(tf.constant(0))):
value = value.numpy() # BytesList won't unpack a string from an EagerTensor.
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _image_float_feature(value):
"""Returns a float_list from a float / double."""
return tf.train.Feature(float_list=tf.train.FloatList(value=value.flatten()))
def _int64_list_feature(value):
"""Returns a float_list from a float / double."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _int64_feature(value):
"""Returns an int64_list from a bool / enum / int / uint."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def raster_sketch_example(raster_sketch, label):
feature = {
'label': _int64_feature(label),
'size': _int64_feature(raster_sketch.shape[0]),
'sketch': _image_float_feature(raster_sketch),
}
return tf.train.Example(features=tf.train.Features(feature=feature))
def parse_raster_sketch_record(example_proto):
image_feature_description = {
'label': tf.io.FixedLenFeature([], tf.int64),
'size': tf.io.FixedLenFeature([], tf.int64),
'sketch': tf.io.FixedLenSequenceFeature([], tf.float32, allow_missing=True),
}
parsed = tf.io.parse_single_example(example_proto, image_feature_description)
sketch = tf.reshape(parsed['sketch'], [parsed['size'], parsed['size'], 1])
return sketch, parsed['label']
def coco_scene_graph_example(image, objs, boxes, masks, triples, attributes, identifier):
feature = {
'image': _image_float_feature(image),
'size': _int64_feature(image.shape[0]),
'n_objs': _int64_feature(len(objs)),
'objs': _int64_list_feature(objs),
'boxes': _image_float_feature(boxes),
'mask_size': _int64_feature(masks.shape[1]),
'masks': _image_float_feature(masks),
'n_triples': _int64_feature(len(triples)),
'triples': _int64_list_feature(triples.flatten()),
'attr_size': _int64_feature(attributes.shape[1]),
'attributes': _int64_list_feature(attributes.flatten()),
'id': _int64_feature(identifier),
}
return tf.train.Example(features=tf.train.Features(feature=feature))
def parse_coco_scene_graph_record(example_proto):
image_feature_description = {
'id': tf.io.FixedLenFeature([], tf.int64),
'n_objs': tf.io.FixedLenFeature([], tf.int64),
'n_triples': tf.io.FixedLenFeature([], tf.int64),
'size': tf.io.FixedLenFeature([], tf.int64),
'mask_size': tf.io.FixedLenFeature([], tf.int64),
'attr_size': tf.io.FixedLenFeature([], tf.int64),
'image': tf.io.FixedLenSequenceFeature([], tf.float32, allow_missing=True),
'objs': tf.io.VarLenFeature(tf.int64),
'boxes': tf.io.VarLenFeature(tf.float32),
'masks': tf.io.VarLenFeature(tf.float32),
'triples': tf.io.VarLenFeature(tf.int64),
'attributes': tf.io.VarLenFeature(tf.int64),
}
parsed = tf.io.parse_single_example(example_proto, image_feature_description)
image = tf.reshape(parsed['image'], [parsed['size'], parsed['size'], 3])
boxes = tf.reshape(
parsed['boxes'].values, [parsed['n_objs'], 4])
masks = tf.reshape(
parsed['masks'].values, [parsed['n_objs'], parsed['mask_size'], parsed['mask_size']])
triples = tf.reshape(
parsed['triples'].values, [parsed['n_triples'], 3])
attributes = tf.reshape(
parsed['attributes'].values, [parsed['n_objs'], parsed['attr_size']])
return parsed['n_objs'], parsed['n_triples'], image, parsed['objs'].values, boxes, masks, triples, attributes, parsed['id']
def coco_crop_example(crop, label):
feature = {
'image': _image_float_feature(crop),
'size': _int64_feature(crop.shape[0]),
'n_objs': _int64_feature(label) # typo: should be 'label'
}
return tf.train.Example(features=tf.train.Features(feature=feature))
def parse_coco_crop_record(example_proto):
image_feature_description = {
'size': tf.io.FixedLenFeature([], tf.int64),
'image': tf.io.FixedLenSequenceFeature([], tf.float32, allow_missing=True),
'n_objs': tf.io.FixedLenFeature([], tf.int64), # typo: sould be 'label'
}
parsed = tf.io.parse_single_example(example_proto, image_feature_description)
image = tf.reshape(parsed['image'], [parsed['size'], parsed['size'], 3])
label = parsed['n_objs']
return image, label
def sketchycoco_crop_example(crop, label, sketch):
feature = {
'image': _image_float_feature(crop),
'size': _int64_feature(crop.shape[0]),
'label': _int64_feature(label),
'sketch': _image_float_feature(sketch),
'sketch_size': _int64_feature(sketch.shape[0]),
}
return tf.train.Example(features=tf.train.Features(feature=feature))
def parse_sketchycoco_crop_record(example_proto):
image_feature_description = {
'size': tf.io.FixedLenFeature([], tf.int64),
'image': tf.io.FixedLenSequenceFeature([], tf.float32, allow_missing=True),
'label': tf.io.FixedLenFeature([], tf.int64),
'sketch': tf.io.FixedLenSequenceFeature([], tf.float32, allow_missing=True),
'sketch_size': tf.io.FixedLenFeature([], tf.int64),
}
parsed = tf.io.parse_single_example(example_proto, image_feature_description)
image = tf.reshape(parsed['image'], [parsed['size'], parsed['size'], 3])
sketch = tf.reshape(parsed['sketch'], [parsed['sketch_size'], parsed['sketch_size'], 1])
return image, parsed['label'], sketch
def sketchy_example(image, sketches, label):
feature = {
'image': _image_float_feature(image),
'size': _int64_feature(image.shape[0]),
'label': _int64_feature(label),
'sketches': _image_float_feature(sketches),
'sketch_size': _int64_feature(sketches.shape[1]),
'n_sketches': _int64_feature(sketches.shape[0]),
}
return tf.train.Example(features=tf.train.Features(feature=feature))
def parse_sketchy_record(example_proto):
image_feature_description = {
'size': tf.io.FixedLenFeature([], tf.int64),
'image': tf.io.FixedLenSequenceFeature([], tf.float32, allow_missing=True),
'label': tf.io.FixedLenFeature([], tf.int64),
'sketches': tf.io.FixedLenSequenceFeature([], tf.float32, allow_missing=True),
'sketch_size': tf.io.FixedLenFeature([], tf.int64),
'n_sketches': tf.io.FixedLenFeature([], tf.int64),
}
parsed = tf.io.parse_single_example(example_proto, image_feature_description)
image = tf.reshape(parsed['image'], [parsed['size'], parsed['size'], 3])
sketches = tf.reshape(parsed['sketches'], [parsed['n_sketches'], parsed['sketch_size'], parsed['sketch_size'], 1])
return image, parsed['label'], sketches
def sketchy_plus_saliency_example(image, sketches, saliency, label):
feature = {
'image': _image_float_feature(image),
'size': _int64_feature(image.shape[0]),
'label': _int64_feature(label),
'sketches': _image_float_feature(sketches),
'saliency': _image_float_feature(saliency),
'sketch_size': _int64_feature(sketches.shape[1]),
'n_sketches': _int64_feature(sketches.shape[0]),
}
return tf.train.Example(features=tf.train.Features(feature=feature))
def parse_sketchy_plus_saliency_record(example_proto):
image_feature_description = {
'size': tf.io.FixedLenFeature([], tf.int64),
'image': tf.io.FixedLenSequenceFeature([], tf.float32, allow_missing=True),
'label': tf.io.FixedLenFeature([], tf.int64),
'sketches': tf.io.FixedLenSequenceFeature([], tf.float32, allow_missing=True),
'saliency': tf.io.FixedLenSequenceFeature([], tf.float32, allow_missing=True),
'sketch_size': tf.io.FixedLenFeature([], tf.int64),
'n_sketches': tf.io.FixedLenFeature([], tf.int64),
}
parsed = tf.io.parse_single_example(example_proto, image_feature_description)
image = tf.reshape(parsed['image'], [parsed['size'], parsed['size'], 3])
sketches = tf.reshape(parsed['sketches'], [parsed['n_sketches'], parsed['sketch_size'], parsed['sketch_size'], 1])
saliency = tf.reshape(parsed['saliency'], [parsed['sketch_size'], parsed['sketch_size'], 1])
return image, parsed['label'], sketches, saliency
def flickr_saliency_example(image, sketch, saliency, label):
feature = {
'image': _image_float_feature(image),
'size': _int64_feature(image.shape[0]),
'label': _int64_feature(label),
'sketch': _image_float_feature(sketch),
'saliency': _image_float_feature(saliency),
'sketch_size': _int64_feature(sketch.shape[1]),
}
return tf.train.Example(features=tf.train.Features(feature=feature))
def parse_flickr_saliency_record(example_proto):
image_feature_description = {
'size': tf.io.FixedLenFeature([], tf.int64),
'image': tf.io.FixedLenSequenceFeature([], tf.float32, allow_missing=True),
'label': tf.io.FixedLenFeature([], tf.int64),
'sketch': tf.io.FixedLenSequenceFeature([], tf.float32, allow_missing=True),
'saliency': tf.io.FixedLenSequenceFeature([], tf.float32, allow_missing=True),
'sketch_size': tf.io.FixedLenFeature([], tf.int64),
}
parsed = tf.io.parse_single_example(example_proto, image_feature_description)
image = tf.reshape(parsed['image'], [parsed['size'], parsed['size'], 3])
sketch = 1 - tf.reshape(parsed['sketch'], [parsed['sketch_size'], parsed['sketch_size'], 1])
saliency = tf.reshape(parsed['saliency'], [parsed['sketch_size'], parsed['sketch_size'], 1])
return image, parsed['label'], sketch, saliency
def qdcoco_fg_example(image, objs, boxes, masks, triples, attributes, identifier, sketches):
feature = {
'image': _image_float_feature(image),
'size': _int64_feature(image.shape[0]),
'n_objs': _int64_feature(len(objs)),
'objs': _int64_list_feature(objs),
'boxes': _image_float_feature(boxes),
'mask_size': _int64_feature(masks.shape[1]),
'masks': _image_float_feature(masks),
'n_triples': _int64_feature(len(triples)),
'triples': _int64_list_feature(triples.flatten()),
'attr_size': _int64_feature(attributes.shape[1]),
'attributes': _int64_list_feature(attributes.flatten()),
'id': _int64_feature(identifier),
'sketches': _image_float_feature(sketches),
'sketch_size': _int64_feature(sketches.shape[2]),
'n_sketches': _int64_feature(sketches.shape[1]),
}
return tf.train.Example(features=tf.train.Features(feature=feature))
def parse_qdcoco_fg_record(example_proto):
image_feature_description = {
'id': tf.io.FixedLenFeature([], tf.int64),
'n_objs': tf.io.FixedLenFeature([], tf.int64),
'n_triples': tf.io.FixedLenFeature([], tf.int64),
'size': tf.io.FixedLenFeature([], tf.int64),
'mask_size': tf.io.FixedLenFeature([], tf.int64),
'attr_size': tf.io.FixedLenFeature([], tf.int64),
'image': tf.io.FixedLenSequenceFeature([], tf.float32, allow_missing=True),
'objs': tf.io.VarLenFeature(tf.int64),
'boxes': tf.io.VarLenFeature(tf.float32),
'masks': tf.io.VarLenFeature(tf.float32),
'triples': tf.io.VarLenFeature(tf.int64),
'attributes': tf.io.VarLenFeature(tf.int64),
'sketches': tf.io.FixedLenSequenceFeature([], tf.float32, allow_missing=True),
'sketch_size': tf.io.FixedLenFeature([], tf.int64),
'n_sketches': tf.io.FixedLenFeature([], tf.int64),
}
parsed = tf.io.parse_single_example(example_proto, image_feature_description)
image = tf.reshape(parsed['image'], [parsed['size'], parsed['size'], 3])
boxes = tf.reshape(
parsed['boxes'].values, [parsed['n_objs'], 4])
masks = tf.reshape(
parsed['masks'].values, [parsed['n_objs'], parsed['mask_size'], parsed['mask_size']])
triples = tf.reshape(
parsed['triples'].values, [parsed['n_triples'], 3])
attributes = tf.reshape(
parsed['attributes'].values, [parsed['n_objs'], parsed['attr_size']])
sketches = tf.reshape(parsed['sketches'], [parsed['n_objs'], parsed['n_sketches'], parsed['sketch_size'], parsed['sketch_size'], 1])
return parsed['n_objs'], parsed['n_triples'], image, parsed['objs'].values, boxes, masks, triples, attributes, parsed['id'], sketches
def sketchycoco_scene_graph_example(image, objs, boxes, masks, triples, attributes, identifier, sketches):
feature = {
'image': _image_float_feature(image),
'size': _int64_feature(image.shape[0]),
'n_objs': _int64_feature(len(objs)),
'objs': _int64_list_feature(objs),
'boxes': _image_float_feature(boxes),
'mask_size': _int64_feature(masks.shape[1]),
'masks': _image_float_feature(masks),
'n_triples': _int64_feature(len(triples)),
'triples': _int64_list_feature(triples.flatten()),
'attr_size': _int64_feature(attributes.shape[1]),
'attributes': _int64_list_feature(attributes.flatten()),
'id': _int64_feature(identifier),
'sketches': _image_float_feature(sketches),
'sketch_size': _int64_feature(sketches.shape[1]),
'n_sketches': _int64_feature(sketches.shape[0]),
}
return tf.train.Example(features=tf.train.Features(feature=feature))
def parse_sketchycoco_scene_graph_record(example_proto):
image_feature_description = {
'id': tf.io.FixedLenFeature([], tf.int64),
'n_objs': tf.io.FixedLenFeature([], tf.int64),
'n_triples': tf.io.FixedLenFeature([], tf.int64),
'size': tf.io.FixedLenFeature([], tf.int64),
'mask_size': tf.io.FixedLenFeature([], tf.int64),
'attr_size': tf.io.FixedLenFeature([], tf.int64),
'image': tf.io.FixedLenSequenceFeature([], tf.float32, allow_missing=True),
'objs': tf.io.VarLenFeature(tf.int64),
'boxes': tf.io.VarLenFeature(tf.float32),
'masks': tf.io.VarLenFeature(tf.float32),
'triples': tf.io.VarLenFeature(tf.int64),
'attributes': tf.io.VarLenFeature(tf.int64),
'sketches': tf.io.FixedLenSequenceFeature([], tf.float32, allow_missing=True),
'sketch_size': tf.io.FixedLenFeature([], tf.int64),
'n_sketches': tf.io.FixedLenFeature([], tf.int64),
}
parsed = tf.io.parse_single_example(example_proto, image_feature_description)
image = tf.reshape(parsed['image'], [parsed['size'], parsed['size'], 3])
boxes = tf.reshape(
parsed['boxes'].values, [parsed['n_objs'], 4])
masks = tf.reshape(
parsed['masks'].values, [parsed['n_objs'], parsed['mask_size'], parsed['mask_size']])
triples = tf.reshape(
parsed['triples'].values, [parsed['n_triples'], 3])
attributes = tf.reshape(
parsed['attributes'].values, [parsed['n_objs'], parsed['attr_size']])
sketches = tf.reshape(parsed['sketches'], [parsed['n_objs'], parsed['sketch_size'], parsed['sketch_size'], 1])
return parsed['n_objs'], parsed['n_triples'], image, parsed['objs'].values, boxes, masks, triples, attributes, parsed['id'], sketches
def token_sketch_example(sketch, label, patch_labels):
feature = {
'label': _int64_feature(label),
'size': _int64_feature(sketch.shape[0]),
'sketch': _image_float_feature(sketch),
'patch_labels': _int64_list_feature(patch_labels),
}
return tf.train.Example(features=tf.train.Features(feature=feature))
def parse_token_sketch_record(example_proto):
image_feature_description = {
'label': tf.io.FixedLenFeature([], tf.int64),
'size': tf.io.FixedLenFeature([], tf.int64),
'sketch': tf.io.FixedLenSequenceFeature([], tf.float32, allow_missing=True),
'patch_labels': tf.io.FixedLenSequenceFeature([], tf.int64, allow_missing=True),
}
parsed = tf.io.parse_single_example(example_proto, image_feature_description)
sketch = tf.reshape(parsed['sketch'], [parsed['size'], parsed['size'], 1])
return sketch, parsed['label'], parsed['patch_labels']
def gram_matrices_example(g0, g1, g2, g3, g4):
feature = {
'g0': _image_float_feature(g0),
'g0_size': _int64_feature(g0.shape[-1]),
'g1': _image_float_feature(g1),
'g1_size': _int64_feature(g1.shape[-1]),
'g2': _image_float_feature(g2),
'g2_size': _int64_feature(g2.shape[-1]),
'g3': _image_float_feature(g3),
'g3_size': _int64_feature(g3.shape[-1]),
'g4': _image_float_feature(g4),
'g4_size': _int64_feature(g4.shape[-1]),
}
return tf.train.Example(features=tf.train.Features(feature=feature))
def parse_gram_matrices_record(example_proto):
image_feature_description = {
'g0': tf.io.FixedLenSequenceFeature([], tf.float32, allow_missing=True),
'g0_size': tf.io.FixedLenFeature([], tf.int64),
'g1': tf.io.FixedLenSequenceFeature([], tf.float32, allow_missing=True),
'g1_size': tf.io.FixedLenFeature([], tf.int64),
'g2': tf.io.FixedLenSequenceFeature([], tf.float32, allow_missing=True),
'g2_size': tf.io.FixedLenFeature([], tf.int64),
'g3': tf.io.FixedLenSequenceFeature([], tf.float32, allow_missing=True),
'g3_size': tf.io.FixedLenFeature([], tf.int64),
'g4': tf.io.FixedLenSequenceFeature([], tf.float32, allow_missing=True),
'g4_size': tf.io.FixedLenFeature([], tf.int64),
}
parsed = tf.io.parse_single_example(example_proto, image_feature_description)
return parsed['g0'], parsed['g1'], parsed['g2'], parsed['g3'], parsed['g4']
| 45.156962 | 137 | 0.665022 | 2,142 | 17,837 | 5.298319 | 0.048086 | 0.034188 | 0.078685 | 0.086968 | 0.894792 | 0.894264 | 0.872588 | 0.863688 | 0.832849 | 0.825095 | 0 | 0.026828 | 0.170376 | 17,837 | 394 | 138 | 45.271574 | 0.7401 | 0.016034 | 0 | 0.695385 | 0 | 0 | 0.106801 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.08 | false | 0 | 0.003077 | 0 | 0.163077 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
ec31a3aec2c696002ce3a0332722195b8ee299b3 | 120 | py | Python | test/__init__.py | mariushoch/wikidata-dump-generation-smoke-tests | d4f566704e602ab74d44246ba7bc15e732bffe38 | [
"BSD-3-Clause"
] | null | null | null | test/__init__.py | mariushoch/wikidata-dump-generation-smoke-tests | d4f566704e602ab74d44246ba7bc15e732bffe38 | [
"BSD-3-Clause"
] | null | null | null | test/__init__.py | mariushoch/wikidata-dump-generation-smoke-tests | d4f566704e602ab74d44246ba7bc15e732bffe38 | [
"BSD-3-Clause"
] | null | null | null | from .TestDumpListingReader import TestDumpListingReader
from .TestDumpListingValidator import TestDumpListingValidator
| 40 | 62 | 0.916667 | 8 | 120 | 13.75 | 0.5 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.066667 | 120 | 2 | 63 | 60 | 0.982143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
6b6da3d9aa36b2a20c612a43c77c4ee86428046d | 107 | py | Python | models/__init__.py | ben-oxley/santa-shares-server | c6bb79c82f2988fd7041d1db63d89e6549c65a2a | [
"MIT"
] | null | null | null | models/__init__.py | ben-oxley/santa-shares-server | c6bb79c82f2988fd7041d1db63d89e6549c65a2a | [
"MIT"
] | 1 | 2019-12-13T23:06:50.000Z | 2019-12-13T23:06:50.000Z | models/__init__.py | ben-oxley/santa-shares-server | c6bb79c82f2988fd7041d1db63d89e6549c65a2a | [
"MIT"
] | 3 | 2019-12-13T22:47:00.000Z | 2019-12-22T11:42:29.000Z | from .item import Item
from .user_item import UserItem
from .user import User
from .user_log import UserLog | 26.75 | 31 | 0.82243 | 18 | 107 | 4.777778 | 0.388889 | 0.27907 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.140187 | 107 | 4 | 32 | 26.75 | 0.934783 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
6b92e58880e191e798fc02ff097d338ad14e9b0c | 24,614 | py | Python | tests/test_mapping_file_mapper_base.py | jermnelson/MARC21-To-FOLIO | 4c598255b6b537e17b79c8921ed6f99877e7f72b | [
"MIT"
] | null | null | null | tests/test_mapping_file_mapper_base.py | jermnelson/MARC21-To-FOLIO | 4c598255b6b537e17b79c8921ed6f99877e7f72b | [
"MIT"
] | null | null | null | tests/test_mapping_file_mapper_base.py | jermnelson/MARC21-To-FOLIO | 4c598255b6b537e17b79c8921ed6f99877e7f72b | [
"MIT"
] | null | null | null | from unittest.mock import MagicMock
from unittest.mock import Mock
from folio_migration_tools.library_configuration import LibraryConfiguration
from folio_migration_tools.mapping_file_transformation.mapping_file_mapper_base import (
MappingFileMapperBase,
)
from folio_migration_tools.migration_tasks.items_transformer import ItemsTransformer
from folio_uuid.folio_namespaces import FOLIONamespaces
from folioclient import FolioClient
# flake8: noqa
class MyTestableFileMapper(MappingFileMapperBase):
def __init__(self, schema: dict, record_map: dict):
mock_conf = Mock(spec=LibraryConfiguration)
mock_folio = Mock(spec=FolioClient)
mock_folio.okapi_url = "okapi_url"
mock_folio.folio_get_single_object = MagicMock(
return_value={
"instances": {"prefix": "pref", "startNumber": "1"},
"holdings": {"prefix": "pref", "startNumber": "1"},
}
)
super().__init__(
mock_folio,
schema,
record_map,
None,
FOLIONamespaces.holdings,
mock_conf,
)
def get_prop(self, legacy_item, folio_prop_name, index_or_id):
legacy_item_keys = self.mapped_from_legacy_data.get(folio_prop_name, [])
if len(legacy_item_keys) == 1 and folio_prop_name in self.mapped_from_values:
return self.mapped_from_values.get(folio_prop_name, "")
legacy_values = MappingFileMapperBase.get_legacy_vals(legacy_item, legacy_item_keys)
return " ".join(legacy_values).strip()
def test_validate_required_properties_sub_pro_missing_uri():
schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "A holdings record",
"type": "object",
"required": ["title"],
"properties": {
"formerIds": {
"type": "array",
"description": "Previous ID(s) assigned to the holdings record",
"items": {"type": "string"},
"uniqueItems": True,
},
"title": {
"type": "string",
"description": "",
},
"subtitle": {
"type": "string",
"description": "",
},
"electronicAccess": {
"description": "List of electronic access items",
"type": "array",
"items": {
"type": "object",
"properties": {
"uri": {
"type": "string",
"description": "uniform resource identifier (URI) is a string of characters designed for unambiguous identification of resources",
},
"relationshipId": {
"type": "string",
"description": "relationship between the electronic resource at the location identified and the item described in the record as a whole",
},
},
"additionalProperties": False,
"required": ["uri"],
},
},
},
}
fake_holdings_map = {
"data": [
{
"folio_field": "title",
"legacy_field": "title_",
"value": "",
"description": "",
},
{
"folio_field": "legacyIdentifier",
"legacy_field": "id",
"value": "",
"description": "",
},
{
"folio_field": "subtitle",
"legacy_field": "subtitle_",
"value": "",
"description": "",
},
{
"folio_field": "formerIds[0]",
"legacy_field": "formerIds_1",
"value": "",
"description": "",
},
{
"folio_field": "formerIds[1]",
"legacy_field": "formerIds_2",
"value": "",
"description": "",
},
{
"folio_field": "electronicAccess[0].relationshipId",
"legacy_field": "",
"value": "f5d0068e-6272-458e-8a81-b85e7b9a14aa",
"description": "",
},
{
"folio_field": "electronicAccess[0].uri",
"legacy_field": "link_",
"value": "",
"description": "",
},
{
"folio_field": "electronicAccess[1].relationshipId",
"legacy_field": "",
"value": "f5d0068e-000-458e-8a81-b85e7b9a14aa",
"description": "",
},
{
"folio_field": "electronicAccess[1].uri",
"legacy_field": "link_2",
"value": "",
"description": "",
},
]
}
record = {
"link_": "some_link",
"formerIds_1": "id1",
"formerIds_2": "id2",
"title_": "actual value",
"subtitle_": "object",
"link_2": "",
"id": "11",
}
tfm = MyTestableFileMapper(schema, fake_holdings_map)
folio_rec, folio_id = tfm.do_map(record, record["id"], FOLIONamespaces.holdings)
assert len(folio_rec["electronicAccess"]) == 1
assert folio_id == "11"
assert folio_rec["id"] == "f00d59ac-4cfc-56d6-9c62-dc9084c18003"
def test_validate_required_properties_sub_pro_missing_uri_and_more():
schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "A holdings record",
"type": "object",
"required": ["title"],
"properties": {
"formerIds": {
"type": "array",
"description": "Previous ID(s) assigned to the holdings record",
"items": {"type": "string"},
"uniqueItems": True,
},
"title": {
"type": "string",
"description": "",
},
"subtitle": {
"type": "string",
"description": "",
},
"electronicAccess": {
"description": "List of electronic access items",
"type": "array",
"items": {
"type": "object",
"properties": {
"uri": {
"type": "string",
"description": "uniform resource identifier (URI) is a string of characters designed for unambiguous identification of resources",
},
"relationshipId": {
"type": "string",
"description": "relationship between the electronic resource at the location identified and the item described in the record as a whole",
},
"third_prop": {
"type": "string",
"description": "relationship between the electronic resource at the location identified and the item described in the record as a whole",
},
},
"additionalProperties": False,
"required": ["uri"],
},
},
},
}
fake_holdings_map = {
"data": [
{
"folio_field": "title",
"legacy_field": "title_",
"value": "",
"description": "",
},
{
"folio_field": "legacyIdentifier",
"legacy_field": "id",
"value": "",
"description": "",
},
{
"folio_field": "subtitle",
"legacy_field": "subtitle_",
"value": "",
"description": "",
},
{
"folio_field": "formerIds[0]",
"legacy_field": "formerIds_1",
"value": "",
"description": "",
},
{
"folio_field": "formerIds[1]",
"legacy_field": "formerIds_2",
"value": "",
"description": "",
},
{
"folio_field": "electronicAccess[0].relationshipId",
"legacy_field": "",
"value": "f5d0068e-6272-458e-8a81-b85e7b9a14aa",
"description": "",
},
{
"folio_field": "electronicAccess[0].third_prop",
"legacy_field": "third_0",
"value": "",
"description": "",
},
{
"folio_field": "electronicAccess[0].uri",
"legacy_field": "link_",
"value": "",
"description": "",
},
{
"folio_field": "electronicAccess[1].relationshipId",
"legacy_field": "",
"value": "f5d0068e-000-458e-8a81-b85e7b9a14aa",
"description": "",
},
{
"folio_field": "electronicAccess[1].uri",
"legacy_field": "link_2",
"value": "",
"description": "",
},
{
"folio_field": "electronicAccess[1].third_prop",
"legacy_field": "third_",
"value": "",
"description": "",
},
]
}
record = {
"link_": "some_link",
"formerIds_1": "id1",
"formerIds_2": "id2",
"title_": "actual value",
"subtitle_": "object",
"link_2": "",
"id": "11",
"third_0": "",
"third_1": "",
}
tfm = MyTestableFileMapper(schema, fake_holdings_map)
folio_rec, folio_id = tfm.do_map(record, record["id"], FOLIONamespaces.holdings)
assert len(folio_rec["electronicAccess"]) == 1
def test_validate_required_properties_item_notes():
schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "A holdings record",
"type": "object",
"required": [],
"properties": {
"notes": {
"type": "array",
"description": "Notes about action, copy, binding etc.",
"items": {
"type": "object",
"properties": {
"itemNoteTypeId": {
"type": "string",
"description": "ID of the type of note",
},
"itemNoteType": {
"description": "Type of item's note",
"type": "object",
"folio:$ref": "itemnotetype.json",
"javaType": "org.folio.rest.jaxrs.model.itemNoteTypeVirtual",
"readonly": True,
"folio:isVirtual": True,
"folio:linkBase": "item-note-types",
"folio:linkFromField": "itemNoteTypeId",
"folio:linkToField": "id",
"folio:includedElement": "itemNoteTypes.0",
},
"note": {
"type": "string",
"description": "Text content of the note",
},
"staffOnly": {
"type": "boolean",
"description": "If true, determines that the note should not be visible for others than staff",
"default": False,
},
},
},
},
},
}
fake_holdings_map = {
"data": [
{
"folio_field": "notes[0].note",
"legacy_field": "note_1",
"value": "",
"description": "",
},
{
"folio_field": "notes[0].staffOnly",
"legacy_field": "",
"value": True,
"description": "",
},
{
"folio_field": "notes[0].itemNoteTypeId",
"legacy_field": "",
"value": "A UUID",
"description": "",
},
{
"folio_field": "notes[1].note",
"legacy_field": "note_2",
"value": "",
"description": "",
},
{
"folio_field": "notes[1].staffOnly",
"legacy_field": "",
"value": False,
"description": "",
},
{
"folio_field": "notes[1].itemNoteTypeId",
"legacy_field": "",
"value": "Another UUID",
"description": "",
},
{
"folio_field": "legacyIdentifier",
"legacy_field": "id",
"value": "",
"description": "",
},
]
}
record = {"note_1": "my note", "note_2": "", "id": "12"}
tfm = MyTestableFileMapper(schema, fake_holdings_map)
folio_rec, folio_id = tfm.do_map(record, record["id"], FOLIONamespaces.holdings)
ItemsTransformer.handle_notes(folio_rec)
assert len(folio_rec["notes"]) == 1
def test_validate_required_properties_item_notes_unmapped():
schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "A holdings record",
"type": "object",
"required": [],
"properties": {
"notes": {
"type": "array",
"description": "Notes about action, copy, binding etc.",
"items": {
"type": "object",
"properties": {
"itemNoteTypeId": {
"type": "string",
"description": "ID of the type of note",
},
"itemNoteType": {
"description": "Type of item's note",
"type": "object",
"folio:$ref": "itemnotetype.json",
"javaType": "org.folio.rest.jaxrs.model.itemNoteTypeVirtual",
"readonly": True,
"folio:isVirtual": True,
"folio:linkBase": "item-note-types",
"folio:linkFromField": "itemNoteTypeId",
"folio:linkToField": "id",
"folio:includedElement": "itemNoteTypes.0",
},
"note": {
"type": "string",
"description": "Text content of the note",
},
"staffOnly": {
"type": "boolean",
"description": "If true, determines that the note should not be visible for others than staff",
"default": False,
},
},
},
},
},
}
fake_holdings_map = {
"data": [
{
"folio_field": "notes[0].note",
"legacy_field": "note_1",
"value": "",
"description": "",
},
{
"folio_field": "notes[0].staffOnly",
"legacy_field": "",
"value": True,
"description": "",
},
{
"folio_field": "notes[0].itemNoteTypeId",
"legacy_field": "",
"value": "A UUID",
"description": "",
},
{
"folio_field": "notes[1].note",
"legacy_field": "Not mapped",
"value": "",
"description": "",
},
{
"folio_field": "notes[1].staffOnly",
"legacy_field": "",
"value": False,
"description": "",
},
{
"folio_field": "notes[1].itemNoteTypeId",
"legacy_field": "",
"value": "UUID",
"description": "",
},
{
"folio_field": "legacyIdentifier",
"legacy_field": "id",
"value": "",
"description": "",
},
]
}
record = {"note_1": "my note", "id": "12"}
tfm = MyTestableFileMapper(schema, fake_holdings_map)
folio_rec, folio_id = tfm.do_map(record, record["id"], FOLIONamespaces.holdings)
ItemsTransformer.handle_notes(folio_rec)
assert len(folio_rec["notes"]) == 1
def test_validate_required_properties_item_notes_unmapped_2():
schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "A holdings record",
"type": "object",
"required": [],
"properties": {
"notes": {
"type": "array",
"description": "Notes about action, copy, binding etc.",
"items": {
"type": "object",
"properties": {
"itemNoteTypeId": {
"type": "string",
"description": "ID of the type of note",
},
"itemNoteType": {
"description": "Type of item's note",
"type": "object",
"folio:$ref": "itemnotetype.json",
"javaType": "org.folio.rest.jaxrs.model.itemNoteTypeVirtual",
"readonly": True,
"folio:isVirtual": True,
"folio:linkBase": "item-note-types",
"folio:linkFromField": "itemNoteTypeId",
"folio:linkToField": "id",
"folio:includedElement": "itemNoteTypes.0",
},
"note": {
"type": "string",
"description": "Text content of the note",
},
"staffOnly": {
"type": "boolean",
"description": "If true, determines that the note should not be visible for others than staff",
"default": False,
},
},
},
},
},
}
fake_holdings_map = {
"data": [
{
"folio_field": "notes[0].note",
"legacy_field": "note_1",
"value": "",
"description": "",
},
{
"folio_field": "notes[0].staffOnly",
"legacy_field": "",
"value": True,
"description": "",
},
{
"folio_field": "notes[0].itemNoteTypeId",
"legacy_field": "",
"value": "A UUID",
"description": "",
},
{
"folio_field": "notes[1].note",
"legacy_field": "Not mapped",
"value": "",
"description": "",
},
{
"folio_field": "notes[1].staffOnly",
"legacy_field": "Not mapped",
"value": "",
"description": "",
},
{
"folio_field": "notes[1].itemNoteTypeId",
"legacy_field": "Not mapped",
"value": "",
"description": "",
},
{
"folio_field": "legacyIdentifier",
"legacy_field": "id",
"value": "",
"description": "",
},
]
}
record = {"note_1": "my note", "id": "12"}
tfm = MyTestableFileMapper(schema, fake_holdings_map)
folio_rec, folio_id = tfm.do_map(record, record["id"], FOLIONamespaces.holdings)
ItemsTransformer.handle_notes(folio_rec)
assert len(folio_rec["notes"]) == 1
def test_validate_required_properties_obj():
schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "A holdings record",
"type": "object",
"required": ["title"],
"properties": {
"formerIds": {
"type": "array",
"description": "Previous ID(s) assigned to the holdings record",
"items": {"type": "string"},
"uniqueItems": True,
},
"title": {
"type": "string",
"description": "",
},
"subtitle": {
"type": "string",
"description": "",
},
"electronicAccessObj": {
"type": "object",
"properties": {
"uri": {
"type": "string",
"description": "uniform resource identifier (URI) is a string of characters designed for unambiguous identification of resources",
},
"relationshipId": {
"type": "string",
"description": "relationship between the electronic resource at the location identified and the item described in the record as a whole",
},
"third_prop": {
"type": "string",
"description": "relationship between the electronic resource at the location identified and the item described in the record as a whole",
},
},
"additionalProperties": False,
"required": ["uri"],
},
},
}
fake_holdings_map = {
"data": [
{
"folio_field": "title",
"legacy_field": "title_",
"value": "",
"description": "",
},
{
"folio_field": "legacyIdentifier",
"legacy_field": "id",
"value": "",
"description": "",
},
{
"folio_field": "subtitle",
"legacy_field": "subtitle_",
"value": "",
"description": "",
},
{
"folio_field": "formerIds[0]",
"legacy_field": "formerIds_1",
"value": "",
"description": "",
},
{
"folio_field": "formerIds[1]",
"legacy_field": "formerIds_2",
"value": "",
"description": "",
},
{
"folio_field": "electronicAccessObj.relationshipId",
"legacy_field": "",
"value": "f5d0068e-6272-458e-8a81-b85e7b9a14aa",
"description": "",
},
{
"folio_field": "electronicAccessObj.third_prop",
"legacy_field": "third_0",
"value": "",
"description": "",
},
{
"folio_field": "electronicAccessObj.uri",
"legacy_field": "link_",
"value": "",
"description": "",
},
]
}
record = {
"link_": "some_link",
"formerIds_1": "id1",
"formerIds_2": "id2",
"title_": "actual value",
"subtitle_": "object",
"id": "11",
"third_0": "",
}
tfm = MyTestableFileMapper(schema, fake_holdings_map)
folio_rec, folio_id = tfm.do_map(record, record["id"], FOLIONamespaces.holdings)
assert folio_rec["electronicAccessObj"]["uri"] == "some_link"
| 35.98538 | 165 | 0.410945 | 1,684 | 24,614 | 5.81829 | 0.116983 | 0.05001 | 0.092162 | 0.074301 | 0.875485 | 0.868953 | 0.86436 | 0.86436 | 0.856603 | 0.846601 | 0 | 0.017248 | 0.455879 | 24,614 | 683 | 166 | 36.038067 | 0.714328 | 0.000488 | 0 | 0.660661 | 0 | 0 | 0.330976 | 0.037724 | 0 | 0 | 0 | 0 | 0.012012 | 1 | 0.012012 | false | 0 | 0.010511 | 0 | 0.027027 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
6bc46672313bc2ba0395c8e062756f3ab131442d | 370 | py | Python | platform/software/lib/ResourceManagerInfo.py | oika/connect | 2486b97256d7adcd130f90d5c3e665d90ef1a39d | [
"Apache-2.0"
] | null | null | null | platform/software/lib/ResourceManagerInfo.py | oika/connect | 2486b97256d7adcd130f90d5c3e665d90ef1a39d | [
"Apache-2.0"
] | null | null | null | platform/software/lib/ResourceManagerInfo.py | oika/connect | 2486b97256d7adcd130f90d5c3e665d90ef1a39d | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
class ResourceManagerInfo:
def __init__(self, manager_address, manager_port):
self.__manager_address = manager_address
self.__manager_port = manager_port
@property
def manager_address(self):
return self.__manager_address
@property
def manager_port(self):
return self.__manager_port
| 23.125 | 54 | 0.675676 | 40 | 370 | 5.7 | 0.325 | 0.241228 | 0.236842 | 0.219298 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003584 | 0.245946 | 370 | 15 | 55 | 24.666667 | 0.81362 | 0.056757 | 0 | 0.2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.3 | false | 0 | 0 | 0.2 | 0.6 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 6 |
d412712039e25932bc0ed2a2880c6ec0fdd930c7 | 84 | py | Python | nr_common/blueprints/job_status/__init__.py | nitred/nr-common | f251e76fe10cb46f609583922d485013f5cba92b | [
"MIT"
] | null | null | null | nr_common/blueprints/job_status/__init__.py | nitred/nr-common | f251e76fe10cb46f609583922d485013f5cba92b | [
"MIT"
] | 1 | 2018-01-07T19:03:35.000Z | 2018-01-07T19:03:35.000Z | nr_common/blueprints/job_status/__init__.py | nitred/nr-common | f251e76fe10cb46f609583922d485013f5cba92b | [
"MIT"
] | 1 | 2018-09-20T02:31:18.000Z | 2018-09-20T02:31:18.000Z | """Initialize."""
from .job_status import job_status_handler
from .models import db
| 21 | 42 | 0.785714 | 12 | 84 | 5.25 | 0.666667 | 0.285714 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.107143 | 84 | 3 | 43 | 28 | 0.84 | 0.130952 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
d4256e2cb5ed5d0c754292c245a70d5175aab06e | 34 | py | Python | pyecwid/validators/__init__.py | DanPalmz/pyecwid | 03fb3077a5aeda62e6aa4be2a6eae5e161be1e23 | [
"MIT"
] | 3 | 2021-07-29T17:00:42.000Z | 2021-11-05T13:35:21.000Z | pyecwid/validators/__init__.py | DanPalmz/pyecwid | 03fb3077a5aeda62e6aa4be2a6eae5e161be1e23 | [
"MIT"
] | 2 | 2021-04-22T04:27:15.000Z | 2021-04-26T02:49:38.000Z | pyecwid/validators/__init__.py | DanPalmz/pyecwid | 03fb3077a5aeda62e6aa4be2a6eae5e161be1e23 | [
"MIT"
] | 1 | 2021-07-08T01:41:27.000Z | 2021-07-08T01:41:27.000Z | from . import paramater_validators | 34 | 34 | 0.882353 | 4 | 34 | 7.25 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.088235 | 34 | 1 | 34 | 34 | 0.935484 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
2e1fc68d61632d0ea65add6a01bfdc6151b69fef | 439 | py | Python | pytest_kafka/__init__.py | karolinepauls/pytest-kafka | 9a91408f8de0f841b3da2e077fc50eae47282771 | [
"MIT"
] | 1 | 2019-10-25T07:12:37.000Z | 2019-10-25T07:12:37.000Z | pytest_kafka/__init__.py | karolinepauls/pytest-kafka | 9a91408f8de0f841b3da2e077fc50eae47282771 | [
"MIT"
] | null | null | null | pytest_kafka/__init__.py | karolinepauls/pytest-kafka | 9a91408f8de0f841b3da2e077fc50eae47282771 | [
"MIT"
] | null | null | null | """Pytest-kafka public API."""
from pytest_kafka._factories import (
make_zookeeper_process, make_kafka_server, make_kafka_consumer, terminate,
KAFKA_SERVER_CONFIG_TEMPLATE, ZOOKEEPER_CONFIG_TEMPLATE, DEFAULT_CONSUMER_TIMEOUT_MS,
)
__all__ = [
'make_zookeeper_process', 'make_kafka_server', 'make_kafka_consumer', 'terminate',
'KAFKA_SERVER_CONFIG_TEMPLATE', 'ZOOKEEPER_CONFIG_TEMPLATE', 'DEFAULT_CONSUMER_TIMEOUT_MS',
]
| 36.583333 | 95 | 0.8041 | 52 | 439 | 6.134615 | 0.365385 | 0.112853 | 0.125392 | 0.15047 | 0.833856 | 0.833856 | 0.833856 | 0.833856 | 0.833856 | 0.833856 | 0 | 0 | 0.100228 | 439 | 11 | 96 | 39.909091 | 0.807595 | 0.05467 | 0 | 0 | 0 | 0 | 0.359413 | 0.249389 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.125 | 0 | 0.125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
5cfe4dfa7a52b1adf17f6f3093da56f236966d84 | 3,471 | py | Python | app/reservation/serializer/payment.py | maro99/yapen | 0de7aa9d4b152aadd18511be6e536e89645452d9 | [
"MIT"
] | 1 | 2019-04-28T12:21:51.000Z | 2019-04-28T12:21:51.000Z | app/reservation/serializer/payment.py | maro99/yapen | 0de7aa9d4b152aadd18511be6e536e89645452d9 | [
"MIT"
] | 5 | 2018-07-30T05:44:44.000Z | 2020-06-05T18:56:41.000Z | app/reservation/serializer/payment.py | maro99/yapen | 0de7aa9d4b152aadd18511be6e536e89645452d9 | [
"MIT"
] | 5 | 2018-07-23T05:21:41.000Z | 2018-08-08T05:00:42.000Z |
import datetime
from rest_framework import serializers
from location.models import Room
from members.models import User
from reservation.models import Reservation
from reservation.serializer.reservation import ReservationSerializer
#
# class ReservationPaySerializer(ReservationSerializer):
#
# class Meta(ReservationSerializer):
#
# fields = ReservationSerializer.Meta.fields + (
# 'room',
# 'user',
# 'checkin_date',
# 'checkout_date',
# 'reservation_price',
# )
#
# def create(self,validated_data):
# list1 = validated_data['checkin_date'].split('-')
# year = int(list1[0])
# month = int(list1[1])
# day = int(list1[2])
# target_date1 = datetime.date(year, month, day)
#
# list2 = validated_data['checkout_date'].split('-')
# year = int(list2[0])
# month = int(list2[1])
# day = int(list2[2])
# target_date2 = datetime.date(year, month, day)
# reservation = Reservation.objects.create(
# room = Room.objects.get(pk=int(validated_data['room'])),
# user = User.objects.get(pk=int(validated_data['user'])),
# checkin_date = target_date1,
# checkout_date = target_date2,
# reservation_price = int(validated_data['reservation_price'])
# )
# return validated_data
# 기존 reservation serializer 상속 안받고 만들어봄.
class ReservationPaySerializer(serializers.ModelSerializer):
class Meta:
model = Reservation
fields = (
'checkin_date',
'checkout_date',
'room',
'user',
'reservation_price',
)
def create(self, validated_data):
list1 = validated_data['checkin_date'].split('-')
year = int(list1[0])
month = int(list1[1])
day = int(list1[2])
target_date1 = datetime.date(year, month, day)
list2 = validated_data['checkout_date'].split('-')
year = int(list2[0])
month = int(list2[1])
day = int(list2[2])
target_date2 = datetime.date(year, month, day)
print(target_date1)
print(target_date2)
reservation = Reservation.objects.create(
room=Room.objects.get(pk=int(validated_data['room'])),
user=User.objects.get(pk=int(validated_data['user'])),
checkin_date=target_date1,
checkout_date=target_date2,
reservation_price=int(validated_data['reservation_price'])
)
reservation.save()
return validated_data
# import datetime
#
# from location.models import Room
# from members.models import User
# from reservation.models import Reservation
# from reservation.serializer.reservation import ReservationSerializer
#
#
# class ReservationPaySerializer(ReservationSerializer):
# class Meta(ReservationSerializer):
#
# fields = ReservationSerializer.Meta.fields + (
# 'room',
# 'user',
# 'checkin_date',
# 'checkout_date',
# 'reservation_price',
# )
# def create(self,validated_data):
# list1 = validated_data['checkin_date'].split('-')
# year = int(list1[0])
# month = int(list1[1])
# day = int(list1[2])
# target_date1 = datetime.date(year, month, day)
# list2 = validated_data['checkout_date'].split('-')
# year = int(list2[0])
# month = int(list2[1])
# day = int(list2[2])
# target_date2 = datetime.date(year, month, day)
# reservation = Reservation.objects.create(
# room = Room.objects.get(pk=int(validated_data['room'])),
# user = User.objects.get(pk=int(validated_data['user'])),
# checkin_date = target_date1,
# checkout_date = target_date2,
# reservation_price = int(validated_data['reservation_price'])
# )
# return validated_data
| 19.834286 | 70 | 0.687698 | 403 | 3,471 | 5.766749 | 0.129032 | 0.11747 | 0.061962 | 0.041308 | 0.880809 | 0.880809 | 0.880809 | 0.880809 | 0.880809 | 0.880809 | 0 | 0.019417 | 0.169116 | 3,471 | 174 | 71 | 19.948276 | 0.786408 | 0.599539 | 0 | 0 | 0 | 0 | 0.080315 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026316 | false | 0 | 0.157895 | 0 | 0.263158 | 0.052632 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
cf1a80bd92809b2db7d34956cdbde3666a84771c | 35 | py | Python | teitoku/__init__.py | yukinotenshi/teitoku | adb54fb7f709e0bac0da6d6f6f8aa00702c2f9c5 | [
"MIT"
] | null | null | null | teitoku/__init__.py | yukinotenshi/teitoku | adb54fb7f709e0bac0da6d6f6f8aa00702c2f9c5 | [
"MIT"
] | null | null | null | teitoku/__init__.py | yukinotenshi/teitoku | adb54fb7f709e0bac0da6d6f6f8aa00702c2f9c5 | [
"MIT"
] | 1 | 2020-01-25T10:53:44.000Z | 2020-01-25T10:53:44.000Z | from teitoku.teitoku import Teitoku | 35 | 35 | 0.885714 | 5 | 35 | 6.2 | 0.6 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.085714 | 35 | 1 | 35 | 35 | 0.96875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
d86432e8599517c300549c8a5da8d58c3d1b2341 | 43 | py | Python | slimmingFile/test.py | FisherEat/python_ios_projects | 1d1bba34d31ff31b732b5ae573c0eca0a4a0fb7b | [
"MIT"
] | null | null | null | slimmingFile/test.py | FisherEat/python_ios_projects | 1d1bba34d31ff31b732b5ae573c0eca0a4a0fb7b | [
"MIT"
] | null | null | null | slimmingFile/test.py | FisherEat/python_ios_projects | 1d1bba34d31ff31b732b5ae573c0eca0a4a0fb7b | [
"MIT"
] | null | null | null | import sys
print(sys.getdefaultencoding()) | 21.5 | 31 | 0.813953 | 5 | 43 | 7 | 0.8 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.069767 | 43 | 2 | 31 | 21.5 | 0.875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.5 | 0 | 0.5 | 0.5 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 1 | 0 | 6 |
d8a539159d132e8b058ec6b8b0edc3c1374f184c | 309 | py | Python | dagger_contrib/serializer/dask/dataframe/__init__.py | larribas/dagger-contrib | 1833614c82241a404b8e54c74052c5067b0ca104 | [
"Apache-2.0"
] | 1 | 2021-10-14T17:26:51.000Z | 2021-10-14T17:26:51.000Z | dagger_contrib/serializer/dask/dataframe/__init__.py | larribas/dagger-contrib | 1833614c82241a404b8e54c74052c5067b0ca104 | [
"Apache-2.0"
] | 3 | 2021-09-24T17:38:08.000Z | 2021-09-28T09:35:05.000Z | dagger_contrib/serializer/dask/dataframe/__init__.py | larribas/dagger-contrib | 1833614c82241a404b8e54c74052c5067b0ca104 | [
"Apache-2.0"
] | null | null | null | """Collection of serializers for Dask DataFrames (https://docs.dask.org/en/latest/generated/dask.dataframe.DataFrame.html#dask.dataframe.DataFrame)."""
from dagger_contrib.serializer.dask.dataframe.as_csv import AsCSV # noqa
from dagger_contrib.serializer.dask.dataframe.as_parquet import AsParquet # noqa
| 61.8 | 151 | 0.81877 | 42 | 309 | 5.928571 | 0.595238 | 0.208835 | 0.176707 | 0.216867 | 0.337349 | 0.337349 | 0.337349 | 0 | 0 | 0 | 0 | 0 | 0.071197 | 309 | 4 | 152 | 77.25 | 0.867596 | 0.504854 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | null | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
2b27ab9ccb8d0d39003f7755271f419e1610f6f1 | 40,457 | py | Python | src/healthcareapis/azext_healthcareapis/generated/_help.py | Caoxuyang/azure-cli-extensions | d2011261f29033cb31a1064256727d87049ab423 | [
"MIT"
] | null | null | null | src/healthcareapis/azext_healthcareapis/generated/_help.py | Caoxuyang/azure-cli-extensions | d2011261f29033cb31a1064256727d87049ab423 | [
"MIT"
] | null | null | null | src/healthcareapis/azext_healthcareapis/generated/_help.py | Caoxuyang/azure-cli-extensions | d2011261f29033cb31a1064256727d87049ab423 | [
"MIT"
] | 1 | 2022-02-14T21:43:29.000Z | 2022-02-14T21:43:29.000Z | # --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=too-many-lines
from knack.help_files import helps
helps['healthcareapis'] = '''
type: group
short-summary: Manage Healthcare Apis
'''
helps['healthcareapis service'] = """
type: group
short-summary: healthcareapis service
"""
helps['healthcareapis service list'] = """
type: command
short-summary: "Get all the service instances in a resource group. And Get all the service instances in a \
subscription."
examples:
- name: List all services in resource group
text: |-
az healthcareapis service list --resource-group "rgname"
- name: List all services in subscription
text: |-
az healthcareapis service list
"""
helps['healthcareapis service show'] = """
type: command
short-summary: "Get the metadata of a service instance."
examples:
- name: Get metadata
text: |-
az healthcareapis service show --resource-group "rg1" --resource-name "service1"
"""
helps['healthcareapis service create'] = """
type: command
short-summary: "Create the metadata of a service instance."
parameters:
- name: --access-policies
short-summary: "The access policies of the service instance."
long-summary: |
Usage: --access-policies object-id=XX
object-id: Required. An Azure AD object ID (User or Apps) that is allowed access to the FHIR service.
Multiple actions can be specified by using more than one --access-policies argument.
- name: --cosmos-db-configuration
short-summary: "The settings for the Cosmos DB database backing the service."
long-summary: |
Usage: --cosmos-db-configuration offer-throughput=XX key-vault-key-uri=XX
offer-throughput: The provisioned throughput for the backing database.
key-vault-key-uri: The URI of the customer-managed key for the backing database.
- name: --authentication-configuration -c
short-summary: "The authentication configuration for the service instance."
long-summary: |
Usage: --authentication-configuration authority=XX audience=XX smart-proxy-enabled=XX
authority: The authority url for the service
audience: The audience url for the service
smart-proxy-enabled: If the SMART on FHIR proxy is enabled
- name: --cors-configuration
short-summary: "The settings for the CORS configuration of the service instance."
long-summary: |
Usage: --cors-configuration origins=XX headers=XX methods=XX max-age=XX allow-credentials=XX
origins: The origins to be allowed via CORS.
headers: The headers to be allowed via CORS.
methods: The methods to be allowed via CORS.
max-age: The max age to be allowed via CORS.
allow-credentials: If credentials are allowed via CORS.
- name: --private-endpoint-connections
short-summary: "The list of private endpoint connections that are set up for this resource."
long-summary: |
Usage: --private-endpoint-connections status=XX description=XX actions-required=XX
status: Indicates whether the connection has been Approved/Rejected/Removed by the owner of the service.
description: The reason for approval/rejection of the connection.
actions-required: A message indicating if changes on the service provider require any updates on the \
consumer.
Multiple actions can be specified by using more than one --private-endpoint-connections argument.
- name: --oci-artifacts
short-summary: "The list of Open Container Initiative (OCI) artifacts."
long-summary: |
Usage: --oci-artifacts login-server=XX image-name=XX digest=XX
login-server: The Azure Container Registry login server.
image-name: The artifact name.
digest: The artifact digest.
Multiple actions can be specified by using more than one --oci-artifacts argument.
examples:
- name: Create or Update a service with all parameters
text: |-
az healthcareapis service create --resource-group "rg1" --resource-name "service1" --identity-type \
"SystemAssigned" --kind "fhir-R4" --location "westus2" --access-policies object-id="c487e7d1-3210-41a3-8ccc-e9372b78da4\
7" --access-policies object-id="5b307da8-43d4-492b-8b66-b0294ade872f" --authentication-configuration \
audience="https://azurehealthcareapis.com" authority="https://login.microsoftonline.com/abfde7b2-df0f-47e6-aabf-2462b07\
508dc" smart-proxy-enabled=true --cors-configuration allow-credentials=false headers="*" max-age=1440 methods="DELETE" \
methods="GET" methods="OPTIONS" methods="PATCH" methods="POST" methods="PUT" origins="*" --cosmos-db-configuration \
key-vault-key-uri="https://my-vault.vault.azure.net/keys/my-key" offer-throughput=1000 --export-configuration-storage-a\
ccount-name "existingStorageAccount" --public-network-access "Disabled"
- name: Create or Update a service with minimum parameters
text: |-
az healthcareapis service create --resource-group "rg1" --resource-name "service2" --kind "fhir-R4" \
--location "westus2" --access-policies object-id="c487e7d1-3210-41a3-8ccc-e9372b78da47"
"""
helps['healthcareapis service update'] = """
type: command
short-summary: "Update the metadata of a service instance."
examples:
- name: Patch service
text: |-
az healthcareapis service update --resource-group "rg1" --resource-name "service1" --tags tag1="value1" \
tag2="value2"
"""
helps['healthcareapis service delete'] = """
type: command
short-summary: "Delete a service instance."
examples:
- name: Delete service
text: |-
az healthcareapis service delete --resource-group "rg1" --resource-name "service1"
"""
helps['healthcareapis service wait'] = """
type: command
short-summary: Place the CLI in a waiting state until a condition of the healthcareapis service is met.
examples:
- name: Pause executing next line of CLI script until the healthcareapis service is successfully created.
text: |-
az healthcareapis service wait --resource-group "rg1" --resource-name "service1" --created
- name: Pause executing next line of CLI script until the healthcareapis service is successfully updated.
text: |-
az healthcareapis service wait --resource-group "rg1" --resource-name "service1" --updated
- name: Pause executing next line of CLI script until the healthcareapis service is successfully deleted.
text: |-
az healthcareapis service wait --resource-group "rg1" --resource-name "service1" --deleted
"""
helps['healthcareapis operation-result'] = """
type: group
short-summary: healthcareapis operation-result
"""
helps['healthcareapis operation-result show'] = """
type: command
short-summary: "Get the operation result for a long running operation."
examples:
- name: Get operation result
text: |-
az healthcareapis operation-result show --location-name "westus" --operation-result-id "exampleid"
"""
helps['healthcareapis private-endpoint-connection'] = """
type: group
short-summary: healthcareapis private-endpoint-connection
"""
helps['healthcareapis private-endpoint-connection list'] = """
type: command
short-summary: "Lists all private endpoint connections for a service."
examples:
- name: PrivateEndpointConnection_List
text: |-
az healthcareapis private-endpoint-connection list --resource-group "rgname" --resource-name "service1"
"""
helps['healthcareapis private-endpoint-connection show'] = """
type: command
short-summary: "Gets the specified private endpoint connection associated with the service."
examples:
- name: PrivateEndpointConnection_GetConnection
text: |-
az healthcareapis private-endpoint-connection show --name "myConnection" --resource-group "rgname" \
--resource-name "service1"
"""
helps['healthcareapis private-endpoint-connection create'] = """
type: command
short-summary: "Update the state of the specified private endpoint connection associated with the service."
parameters:
- name: --private-link-service-connection-state -s
short-summary: "A collection of information about the state of the connection between service consumer and \
provider."
long-summary: |
Usage: --private-link-service-connection-state status=XX description=XX actions-required=XX
status: Indicates whether the connection has been Approved/Rejected/Removed by the owner of the service.
description: The reason for approval/rejection of the connection.
actions-required: A message indicating if changes on the service provider require any updates on the \
consumer.
examples:
- name: PrivateEndpointConnection_CreateOrUpdate
text: |-
az healthcareapis private-endpoint-connection create --name "myConnection" \
--private-link-service-connection-state description="Auto-Approved" status="Approved" --resource-group "rgname" \
--resource-name "service1"
"""
helps['healthcareapis private-endpoint-connection update'] = """
type: command
short-summary: "Update the state of the specified private endpoint connection associated with the service."
parameters:
- name: --private-link-service-connection-state -s
short-summary: "A collection of information about the state of the connection between service consumer and \
provider."
long-summary: |
Usage: --private-link-service-connection-state status=XX description=XX actions-required=XX
status: Indicates whether the connection has been Approved/Rejected/Removed by the owner of the service.
description: The reason for approval/rejection of the connection.
actions-required: A message indicating if changes on the service provider require any updates on the \
consumer.
"""
helps['healthcareapis private-endpoint-connection delete'] = """
type: command
short-summary: "Deletes a private endpoint connection."
examples:
- name: PrivateEndpointConnections_Delete
text: |-
az healthcareapis private-endpoint-connection delete --name "myConnection" --resource-group "rgname" \
--resource-name "service1"
"""
helps['healthcareapis private-endpoint-connection wait'] = """
type: command
short-summary: Place the CLI in a waiting state until a condition of the healthcareapis \
private-endpoint-connection is met.
examples:
- name: Pause executing next line of CLI script until the healthcareapis private-endpoint-connection is \
successfully created.
text: |-
az healthcareapis private-endpoint-connection wait --name "myConnection" --resource-group "rgname" \
--resource-name "service1" --created
- name: Pause executing next line of CLI script until the healthcareapis private-endpoint-connection is \
successfully updated.
text: |-
az healthcareapis private-endpoint-connection wait --name "myConnection" --resource-group "rgname" \
--resource-name "service1" --updated
- name: Pause executing next line of CLI script until the healthcareapis private-endpoint-connection is \
successfully deleted.
text: |-
az healthcareapis private-endpoint-connection wait --name "myConnection" --resource-group "rgname" \
--resource-name "service1" --deleted
"""
helps['healthcareapis private-link-resource'] = """
type: group
short-summary: healthcareapis private-link-resource
"""
helps['healthcareapis private-link-resource list'] = """
type: command
short-summary: "Gets the private link resources that need to be created for a service."
examples:
- name: PrivateLinkResources_ListGroupIds
text: |-
az healthcareapis private-link-resource list --resource-group "rgname" --resource-name "service1"
"""
helps['healthcareapis private-link-resource show'] = """
type: command
short-summary: "Gets a private link resource that need to be created for a service."
examples:
- name: PrivateLinkResources_Get
text: |-
az healthcareapis private-link-resource show --group-name "fhir" --resource-group "rgname" \
--resource-name "service1"
"""
helps['healthcareapis acr'] = """
type: group
short-summary: healthcareapis acr
"""
helps['healthcareapis acr list'] = """
type: command
short-summary: "Lists all container registries associated with the service."
examples:
- name: Acr_List
text: |-
az healthcareapis acr list --resource-group "rgname" --resource-name "service1"
"""
helps['healthcareapis acr add'] = """
type: command
short-summary: "Add a list of registries to the service, repeated ones will be ignored."
examples:
- name: Acr_Add
text: |-
az healthcareapis acr add --login-servers "test1.azurecr.io test2.azurecr.io test3.azurecr.io" --resource-group "rgname" \
--resource-name "service1"
"""
helps['healthcareapis acr remove'] = """
type: command
short-summary: "Remove a list of registries from the service, non-existing ones will be ignored."
examples:
- name: Acr_Remove
text: |-
az healthcareapis acr remove --login-servers "test1.azurecr.io test2.azurecr.io" --resource-group "rgname" \
--resource-name "service1"
"""
helps['healthcareapis acr reset'] = """
type: command
short-summary: "Reset the container registries associated with the service to a new list."
examples:
- name: Acr_Reset
text: |-
az healthcareapis acr reset --login-servers "test1.azurecr.io" --resource-group "rgname" \
--resource-name "service1"
- name: Acr_Reset_To_Empty
text: |-
az healthcareapis acr reset --resource-group "rgname" \
--resource-name "service1"
"""
helps['healthcareapis workspace'] = """
type: group
short-summary: Manage workspace with healthcareapis
"""
helps['healthcareapis workspace list'] = """
type: command
short-summary: "Lists all the available workspaces under the specified resource group. And Lists all the available \
workspaces under the specified subscription."
examples:
- name: Get workspaces by resource group
text: |-
az healthcareapis workspace list --resource-group "testRG"
- name: Get workspaces by subscription
text: |-
az healthcareapis workspace list
"""
helps['healthcareapis workspace show'] = """
type: command
short-summary: "Gets the properties of the specified workspace."
examples:
- name: Get workspace
text: |-
az healthcareapis workspace show --resource-group "testRG" --name "workspace1"
"""
helps['healthcareapis workspace create'] = """
type: command
short-summary: "Create a workspace resource with the specified parameters."
examples:
- name: Create or update a workspace
text: |-
az healthcareapis workspace create --resource-group "testRG" --location "westus" --name "workspace1"
"""
helps['healthcareapis workspace update'] = """
type: command
short-summary: "Patch workspace details."
examples:
- name: Update a workspace
text: |-
az healthcareapis workspace update --resource-group "testRG" --name "workspace1" --tags \
tagKey="tagValue"
"""
helps['healthcareapis workspace delete'] = """
type: command
short-summary: "Deletes a specified workspace."
examples:
- name: Delete a workspace
text: |-
az healthcareapis workspace delete --resource-group "testRG" --name "workspace1"
"""
helps['healthcareapis workspace wait'] = """
type: command
short-summary: Place the CLI in a waiting state until a condition of the healthcareapis workspace is met.
examples:
- name: Pause executing next line of CLI script until the healthcareapis workspace is successfully created.
text: |-
az healthcareapis workspace wait --resource-group "testRG" --name "workspace1" --created
- name: Pause executing next line of CLI script until the healthcareapis workspace is successfully updated.
text: |-
az healthcareapis workspace wait --resource-group "testRG" --name "workspace1" --updated
- name: Pause executing next line of CLI script until the healthcareapis workspace is successfully deleted.
text: |-
az healthcareapis workspace wait --resource-group "testRG" --name "workspace1" --deleted
"""
helps['healthcareapis workspace dicom-service'] = """
type: group
short-summary: Manage dicom service with healthcareapis
"""
helps['healthcareapis workspace dicom-service list'] = """
type: command
short-summary: "Lists all DICOM Services for the given workspace."
examples:
- name: List dicomservices
text: |-
az healthcareapis workspace dicom-service list --resource-group "testRG" --workspace-name "workspace1"
"""
helps['healthcareapis workspace dicom-service show'] = """
type: command
short-summary: "Gets the properties of the specified DICOM Service."
examples:
- name: Get a dicomservice
text: |-
az healthcareapis workspace dicom-service show --name "blue" --resource-group "testRG" --workspace-name \
"workspace1"
"""
helps['healthcareapis workspace dicom-service create'] = """
type: command
short-summary: "Create a DICOM Service resource with the specified parameters."
examples:
- name: Create or update a Dicom Service
text: |-
az healthcareapis workspace dicom-service create --name "blue" --location "westus" --resource-group \
"testRG" --workspace-name "workspace1"
"""
helps['healthcareapis workspace dicom-service update'] = """
type: command
short-summary: "Patch DICOM Service details."
examples:
- name: Update a dicomservice
text: |-
az healthcareapis workspace dicom-service update --name "blue" --tags tagKey="tagValue" \
--resource-group "testRG" --workspace-name "workspace1"
"""
helps['healthcareapis workspace dicom-service delete'] = """
type: command
short-summary: "Deletes a DICOM Service."
examples:
- name: Delete a dicomservice
text: |-
az healthcareapis workspace dicom-service delete --name "blue" --resource-group "testRG" \
--workspace-name "workspace1"
"""
helps['healthcareapis workspace dicom-service wait'] = """
type: command
short-summary: Place the CLI in a waiting state until a condition of the healthcareapis workspace dicom-service is \
met.
examples:
- name: Pause executing next line of CLI script until the healthcareapis workspace dicom-service is successfully \
created.
text: |-
az healthcareapis workspace dicom-service wait --name "blue" --resource-group "testRG" --workspace-name \
"workspace1" --created
- name: Pause executing next line of CLI script until the healthcareapis workspace dicom-service is successfully \
updated.
text: |-
az healthcareapis workspace dicom-service wait --name "blue" --resource-group "testRG" --workspace-name \
"workspace1" --updated
- name: Pause executing next line of CLI script until the healthcareapis workspace dicom-service is successfully \
deleted.
text: |-
az healthcareapis workspace dicom-service wait --name "blue" --resource-group "testRG" --workspace-name \
"workspace1" --deleted
"""
helps['healthcareapis workspace iot-connector'] = """
type: group
short-summary: Manage iot connector with healthcareapis
"""
helps['healthcareapis workspace iot-connector list'] = """
type: command
short-summary: "Lists all IoT Connectors for the given workspace."
examples:
- name: List iotconnectors
text: |-
az healthcareapis workspace iot-connector list --resource-group "testRG" --workspace-name "workspace1"
"""
helps['healthcareapis workspace iot-connector show'] = """
type: command
short-summary: "Gets the properties of the specified IoT Connector."
examples:
- name: Get an IoT Connector
text: |-
az healthcareapis workspace iot-connector show --name "blue" --resource-group "testRG" --workspace-name \
"workspace1"
"""
helps['healthcareapis workspace iot-connector create'] = """
type: command
short-summary: "Create an IoT Connector resource with the specified parameters."
parameters:
- name: --ingestion-endpoint-configuration -c
short-summary: "Source configuration."
long-summary: |
Usage: --ingestion-endpoint-configuration event-hub-name=XX consumer-group=XX \
fully-qualified-event-hub-namespace=XX
event-hub-name: Event Hub name to connect to.
consumer-group: Consumer group of the event hub to connected to.
fully-qualified-event-hub-namespace: Fully qualified namespace of the Event Hub to connect to.
examples:
- name: Create an IoT Connector
text: |-
az healthcareapis workspace iot-connector create --identity-type "SystemAssigned" --location "westus" --content \
"{\\"template\\":[{\\"template\\":{\\"deviceIdExpression\\":\\"$.deviceid\\",\\"timestampExpression\\":\\"$.measurement\
datetime\\",\\"typeMatchExpression\\":\\"$..[?(@heartrate)]\\",\\"typeName\\":\\"heartrate\\",\\"values\\":[{\\"require\
d\\":\\"true\\",\\"valueExpression\\":\\"$.heartrate\\",\\"valueName\\":\\"hr\\"}]},\\"templateType\\":\\"JsonPathConte\
nt\\"}],\\"templateType\\":\\"CollectionContent\\"}" --ingestion-endpoint-configuration consumer-group="ConsumerGroupA"\
event-hub-name="MyEventHubName" fully-qualified-event-hub-namespace="myeventhub.servicesbus.windows.net" --tags \
additionalProp1="string" additionalProp2="string" additionalProp3="string" --name "blue" --resource-group "testRG" \
--workspace-name "workspace1"
"""
helps['healthcareapis workspace iot-connector update'] = """
type: command
short-summary: "Patch an IoT Connector."
examples:
- name: Patch an IoT Connector
text: |-
az healthcareapis workspace iot-connector update --name "blue" --identity-type "SystemAssigned" --tags \
additionalProp1="string" additionalProp2="string" additionalProp3="string" --resource-group "testRG" --workspace-name \
"workspace1"
"""
helps['healthcareapis workspace iot-connector delete'] = """
type: command
short-summary: "Deletes an IoT Connector."
examples:
- name: Delete an IoT Connector
text: |-
az healthcareapis workspace iot-connector delete --name "blue" --resource-group "testRG" \
--workspace-name "workspace1"
"""
helps['healthcareapis workspace iot-connector wait'] = """
type: command
short-summary: Place the CLI in a waiting state until a condition of the healthcareapis workspace iot-connector is \
met.
examples:
- name: Pause executing next line of CLI script until the healthcareapis workspace iot-connector is successfully \
created.
text: |-
az healthcareapis workspace iot-connector wait --name "blue" --resource-group "testRG" --workspace-name \
"workspace1" --created
- name: Pause executing next line of CLI script until the healthcareapis workspace iot-connector is successfully \
updated.
text: |-
az healthcareapis workspace iot-connector wait --name "blue" --resource-group "testRG" --workspace-name \
"workspace1" --updated
- name: Pause executing next line of CLI script until the healthcareapis workspace iot-connector is successfully \
deleted.
text: |-
az healthcareapis workspace iot-connector wait --name "blue" --resource-group "testRG" --workspace-name \
"workspace1" --deleted
"""
helps['healthcareapis workspace iot-connector fhir-destination'] = """
type: group
short-summary: Manage iot connector fhir destination with healthcareapis
"""
helps['healthcareapis workspace iot-connector fhir-destination list'] = """
type: command
short-summary: "Lists all FHIR destinations for the given IoT Connector."
examples:
- name: List IoT Connectors
text: |-
az healthcareapis workspace iot-connector fhir-destination list --iot-connector-name "blue" \
--resource-group "testRG" --workspace-name "workspace1"
"""
helps['healthcareapis workspace iot-connector fhir-destination show'] = """
type: command
short-summary: "Gets the properties of the specified Iot Connector FHIR destination."
examples:
- name: Get an IoT Connector destination
text: |-
az healthcareapis workspace iot-connector fhir-destination show --fhir-destination-name "dest1" \
--iot-connector-name "blue" --resource-group "testRG" --workspace-name "workspace1"
"""
helps['healthcareapis workspace iot-connector fhir-destination create'] = """
type: command
short-summary: "Create an IoT Connector FHIR destination resource with the specified parameters."
examples:
- name: Create or update an Iot Connector FHIR destination
text: |-
az healthcareapis workspace iot-connector fhir-destination create --fhir-destination-name "dest1" \
--iot-connector-name "blue" --location "westus" --content "{\\"template\\":[{\\"template\\":{\\"codes\\":[{\\"code\\":\
\\"8867-4\\",\\"display\\":\\"Heart rate\\",\\"system\\":\\"http://loinc.org\\"}],\\"periodInterval\\":60,\\"typeName\\\
":\\"heartrate\\",\\"value\\":{\\"defaultPeriod\\":5000,\\"unit\\":\\"count/min\\",\\"valueName\\":\\"hr\\",\\"valueTyp\
e\\":\\"SampledData\\"}},\\"templateType\\":\\"CodeValueFhir\\"}],\\"templateType\\":\\"CollectionFhirTemplate\\"}" \
--fhir-service-resource-id "subscriptions/11111111-2222-3333-4444-555566667777/resourceGroups/myrg/providers/Microsoft.\
HealthcareApis/workspaces/myworkspace/fhirservices/myfhirservice" --resource-identity-resolution-type "Create" \
--resource-group "testRG" --workspace-name "workspace1"
"""
helps['healthcareapis workspace iot-connector fhir-destination update'] = """
type: command
short-summary: "Update an IoT Connector FHIR destination resource with the specified parameters."
"""
helps['healthcareapis workspace iot-connector fhir-destination delete'] = """
type: command
short-summary: "Deletes an IoT Connector FHIR destination."
examples:
- name: Delete an IoT Connector destination
text: |-
az healthcareapis workspace iot-connector fhir-destination delete --fhir-destination-name "dest1" \
--iot-connector-name "blue" --resource-group "testRG" --workspace-name "workspace1"
"""
helps['healthcareapis workspace iot-connector fhir-destination wait'] = """
type: command
short-summary: Place the CLI in a waiting state until a condition of the healthcareapis workspace iot-connector \
fhir-destination is met.
examples:
- name: Pause executing next line of CLI script until the healthcareapis workspace iot-connector \
fhir-destination is successfully created.
text: |-
az healthcareapis workspace iot-connector fhir-destination wait --fhir-destination-name "dest1" \
--iot-connector-name "blue" --resource-group "testRG" --workspace-name "workspace1" --created
- name: Pause executing next line of CLI script until the healthcareapis workspace iot-connector \
fhir-destination is successfully updated.
text: |-
az healthcareapis workspace iot-connector fhir-destination wait --fhir-destination-name "dest1" \
--iot-connector-name "blue" --resource-group "testRG" --workspace-name "workspace1" --updated
- name: Pause executing next line of CLI script until the healthcareapis workspace iot-connector \
fhir-destination is successfully deleted.
text: |-
az healthcareapis workspace iot-connector fhir-destination wait --fhir-destination-name "dest1" \
--iot-connector-name "blue" --resource-group "testRG" --workspace-name "workspace1" --deleted
"""
helps['healthcareapis workspace fhir-service'] = """
type: group
short-summary: Manage fhir service with healthcareapis
"""
helps['healthcareapis workspace fhir-service list'] = """
type: command
short-summary: "Lists all FHIR Services for the given workspace."
examples:
- name: List fhirservices
text: |-
az healthcareapis workspace fhir-service list --resource-group "testRG" --workspace-name "workspace1"
"""
helps['healthcareapis workspace fhir-service show'] = """
type: command
short-summary: "Gets the properties of the specified FHIR Service."
examples:
- name: Get a Fhir Service
text: |-
az healthcareapis workspace fhir-service show --name "fhirservices1" --resource-group "testRG" \
--workspace-name "workspace1"
"""
helps['healthcareapis workspace fhir-service create'] = """
type: command
short-summary: "Create a FHIR Service resource with the specified parameters."
parameters:
- name: --access-policies
short-summary: "Fhir Service access policies."
long-summary: |
Usage: --access-policies object-id=XX
object-id: Required. An Azure AD object ID (User or Apps) that is allowed access to the FHIR service.
Multiple actions can be specified by using more than one --access-policies argument.
- name: --authentication-configuration -c
short-summary: "Fhir Service authentication configuration."
long-summary: |
Usage: --authentication-configuration authority=XX audience=XX smart-proxy-enabled=XX
authority: The authority url for the service
audience: The audience url for the service
smart-proxy-enabled: If the SMART on FHIR proxy is enabled
- name: --cors-configuration
short-summary: "Fhir Service Cors configuration."
long-summary: |
Usage: --cors-configuration origins=XX headers=XX methods=XX max-age=XX allow-credentials=XX
origins: The origins to be allowed via CORS.
headers: The headers to be allowed via CORS.
methods: The methods to be allowed via CORS.
max-age: The max age to be allowed via CORS.
allow-credentials: If credentials are allowed via CORS.
- name: --oci-artifacts
short-summary: "The list of Open Container Initiative (OCI) artifacts."
long-summary: |
Usage: --oci-artifacts login-server=XX image-name=XX digest=XX
login-server: The Azure Container Registry login server.
image-name: The artifact name.
digest: The artifact digest.
Multiple actions can be specified by using more than one --oci-artifacts argument.
examples:
- name: Create or update a Fhir Service
text: |-
az healthcareapis workspace fhir-service create --name "fhirservice1" --identity-type "SystemAssigned" --kind \
"fhir-R4" --location "westus" --access-policies object-id="c487e7d1-3210-41a3-8ccc-e9372b78da47" --access-policies \
object-id="5b307da8-43d4-492b-8b66-b0294ade872f" --login-servers "test1.azurecr.io" --authentication-configuration \
audience="https://azurehealthcareapis.com" authority="https://login.microsoftonline.com/abfde7b2-df0f-47e6-aabf-2462b07\
508dc" smart-proxy-enabled=true --cors-configuration allow-credentials=false headers="*" max-age=1440 methods="DELETE" \
methods="GET" methods="OPTIONS" methods="PATCH" methods="POST" methods="PUT" origins="*" --export-configuration-storage-account-name \
"existingStorageAccount" --tags additionalProp1="string" additionalProp2="string" additionalProp3="string" \
--resource-group "testRG" --workspace-name "workspace1"
"""
helps['healthcareapis workspace fhir-service update'] = """
type: command
short-summary: "Patch FHIR Service details."
examples:
- name: Update a Fhir Service
text: |-
az healthcareapis workspace fhir-service update --name "fhirservice1" --tags tagKey="tagValue" \
--resource-group "testRG" --workspace-name "workspace1"
"""
helps['healthcareapis workspace fhir-service delete'] = """
type: command
short-summary: "Deletes a FHIR Service."
examples:
- name: Delete a Fhir Service
text: |-
az healthcareapis workspace fhir-service delete --name "fhirservice1" --resource-group "testRG" \
--workspace-name "workspace1"
"""
helps['healthcareapis workspace fhir-service wait'] = """
type: command
short-summary: Place the CLI in a waiting state until a condition of the healthcareapis workspace fhir-service is \
met.
examples:
- name: Pause executing next line of CLI script until the healthcareapis workspace fhir-service is successfully \
created.
text: |-
az healthcareapis workspace fhir-service wait --name "fhirservices1" --resource-group "testRG" \
--workspace-name "workspace1" --created
- name: Pause executing next line of CLI script until the healthcareapis workspace fhir-service is successfully \
updated.
text: |-
az healthcareapis workspace fhir-service wait --name "fhirservices1" --resource-group "testRG" \
--workspace-name "workspace1" --updated
- name: Pause executing next line of CLI script until the healthcareapis workspace fhir-service is successfully \
deleted.
text: |-
az healthcareapis workspace fhir-service wait --name "fhirservices1" --resource-group "testRG" \
--workspace-name "workspace1" --deleted
"""
helps['healthcareapis workspace private-endpoint-connection'] = """
type: group
short-summary: Manage workspace private endpoint connection with healthcareapis
"""
helps['healthcareapis workspace private-endpoint-connection list'] = """
type: command
short-summary: "Lists all private endpoint connections for a workspace."
examples:
- name: WorkspacePrivateEndpointConnection_List
text: |-
az healthcareapis workspace private-endpoint-connection list --resource-group "testRG" --workspace-name \
"workspace1"
"""
helps['healthcareapis workspace private-endpoint-connection show'] = """
type: command
short-summary: "Gets the specified private endpoint connection associated with the workspace."
examples:
- name: WorkspacePrivateEndpointConnection_GetConnection
text: |-
az healthcareapis workspace private-endpoint-connection show --private-endpoint-connection-name \
"myConnection" --resource-group "testRG" --workspace-name "workspace1"
"""
helps['healthcareapis workspace private-endpoint-connection create'] = """
type: command
short-summary: "Update the state of the specified private endpoint connection associated with the workspace."
parameters:
- name: --private-link-service-connection-state -s
short-summary: "A collection of information about the state of the connection between service consumer and \
provider."
long-summary: |
Usage: --private-link-service-connection-state status=XX description=XX actions-required=XX
status: Indicates whether the connection has been Approved/Rejected/Removed by the owner of the service.
description: The reason for approval/rejection of the connection.
actions-required: A message indicating if changes on the service provider require any updates on the \
consumer.
examples:
- name: WorkspacePrivateEndpointConnection_CreateOrUpdate
text: |-
az healthcareapis workspace private-endpoint-connection create --private-endpoint-connection-name \
"myConnection" --private-link-service-connection-state description="Auto-Approved" status="Approved" --resource-group \
"testRG" --workspace-name "workspace1"
"""
helps['healthcareapis workspace private-endpoint-connection update'] = """
type: command
short-summary: "Update the state of the specified private endpoint connection associated with the workspace."
parameters:
- name: --private-link-service-connection-state -s
short-summary: "A collection of information about the state of the connection between service consumer and \
provider."
long-summary: |
Usage: --private-link-service-connection-state status=XX description=XX actions-required=XX
status: Indicates whether the connection has been Approved/Rejected/Removed by the owner of the service.
description: The reason for approval/rejection of the connection.
actions-required: A message indicating if changes on the service provider require any updates on the \
consumer.
"""
helps['healthcareapis workspace private-endpoint-connection delete'] = """
type: command
short-summary: "Deletes a private endpoint connection."
examples:
- name: WorkspacePrivateEndpointConnections_Delete
text: |-
az healthcareapis workspace private-endpoint-connection delete --private-endpoint-connection-name \
"myConnection" --resource-group "testRG" --workspace-name "workspace1"
"""
helps['healthcareapis workspace private-endpoint-connection wait'] = """
type: command
short-summary: Place the CLI in a waiting state until a condition of the healthcareapis workspace \
private-endpoint-connection is met.
examples:
- name: Pause executing next line of CLI script until the healthcareapis workspace private-endpoint-connection \
is successfully created.
text: |-
az healthcareapis workspace private-endpoint-connection wait --private-endpoint-connection-name \
"myConnection" --resource-group "testRG" --workspace-name "workspace1" --created
- name: Pause executing next line of CLI script until the healthcareapis workspace private-endpoint-connection \
is successfully updated.
text: |-
az healthcareapis workspace private-endpoint-connection wait --private-endpoint-connection-name \
"myConnection" --resource-group "testRG" --workspace-name "workspace1" --updated
- name: Pause executing next line of CLI script until the healthcareapis workspace private-endpoint-connection \
is successfully deleted.
text: |-
az healthcareapis workspace private-endpoint-connection wait --private-endpoint-connection-name \
"myConnection" --resource-group "testRG" --workspace-name "workspace1" --deleted
"""
helps['healthcareapis workspace private-link-resource'] = """
type: group
short-summary: Manage workspace private link resource with healthcareapis
"""
helps['healthcareapis workspace private-link-resource list'] = """
type: command
short-summary: "Gets the private link resources that need to be created for a workspace."
examples:
- name: WorkspacePrivateLinkResources_ListGroupIds
text: |-
az healthcareapis workspace private-link-resource list --resource-group "testRG" --workspace-name \
"workspace1"
"""
helps['healthcareapis workspace private-link-resource show'] = """
type: command
short-summary: "Gets a private link resource that need to be created for a workspace."
examples:
- name: WorkspacePrivateLinkResources_Get
text: |-
az healthcareapis workspace private-link-resource show --group-name "healthcareworkspace" \
--resource-group "testRG" --workspace-name "workspace1"
"""
| 45.355381 | 137 | 0.689918 | 4,496 | 40,457 | 6.203737 | 0.086966 | 0.097304 | 0.053062 | 0.047003 | 0.854654 | 0.809659 | 0.748602 | 0.682705 | 0.640686 | 0.601534 | 0 | 0.009437 | 0.201102 | 40,457 | 891 | 138 | 45.406285 | 0.853532 | 0.011617 | 0 | 0.59264 | 0 | 0.102792 | 0.963778 | 0.125425 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.001269 | 0 | 0.001269 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
2b57ee0743336edd24a941102691b8cf39c7da7f | 105 | py | Python | python/testData/inspections/PyRelativeImportInspection/PlainDirectoryDottedImportFromTwoElementsWithAs/plainDirectory/script.py | Tasemo/intellij-community | 50aeaf729b7073e91c7c77487a1f155e0dfe3fcd | [
"Apache-2.0"
] | 2 | 2019-04-28T07:48:50.000Z | 2020-12-11T14:18:08.000Z | python/testData/inspections/PyRelativeImportInspection/PlainDirectoryDottedImportFromTwoElementsWithAs/plainDirectory/script.py | Tasemo/intellij-community | 50aeaf729b7073e91c7c77487a1f155e0dfe3fcd | [
"Apache-2.0"
] | null | null | null | python/testData/inspections/PyRelativeImportInspection/PlainDirectoryDottedImportFromTwoElementsWithAs/plainDirectory/script.py | Tasemo/intellij-community | 50aeaf729b7073e91c7c77487a1f155e0dfe3fcd | [
"Apache-2.0"
] | null | null | null | <weak_warning descr="Relative import outside of a package">from .util import foo, bar as b</weak_warning> | 105 | 105 | 0.790476 | 18 | 105 | 4.5 | 0.833333 | 0.271605 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.114286 | 105 | 1 | 105 | 105 | 0.870968 | 0 | 0 | 0 | 0 | 0 | 0.339623 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 1 | null | null | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
2b6b50202e01bfc57506ff7262777817e3b4c1f0 | 24 | py | Python | cases/test_case.py | r0x0d/dismod | b6f8473861cf7dcdff32f844b58ee288996eb99a | [
"MIT"
] | null | null | null | cases/test_case.py | r0x0d/dismod | b6f8473861cf7dcdff32f844b58ee288996eb99a | [
"MIT"
] | 15 | 2022-03-11T00:24:57.000Z | 2022-03-21T23:51:52.000Z | cases/test_case.py | r0x0d/dismod | b6f8473861cf7dcdff32f844b58ee288996eb99a | [
"MIT"
] | null | null | null | from . import test_case
| 12 | 23 | 0.791667 | 4 | 24 | 4.5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.166667 | 24 | 1 | 24 | 24 | 0.9 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
9929df953a89372ddeb9626936bebf1a9b041dc4 | 12,230 | py | Python | src/policies/rule.py | ZhiyuanYaoJ/SimLB | fee007cb7d387c9c9061740e744f32563d59766f | [
"Apache-2.0"
] | 8 | 2022-02-10T18:51:43.000Z | 2022-03-18T09:15:53.000Z | src/policies/rule.py | ZhiyuanYaoJ/SimLB | fee007cb7d387c9c9061740e744f32563d59766f | [
"Apache-2.0"
] | null | null | null | src/policies/rule.py | ZhiyuanYaoJ/SimLB | fee007cb7d387c9c9061740e744f32563d59766f | [
"Apache-2.0"
] | null | null | null | import random
import time
from config.global_conf import ACTION_DIM, RENDER, LB_PERIOD, B_OFFSET, RENDER_RECEIVE, HEURISTIC_ALPHA
from common.entities import NodeLB
import numpy as np
class NodeLBLSQ(NodeLB):
def __init__(self, id, child_ids, bucket_size=65536, weights=None, max_n_child=ACTION_DIM, T0=time.time(), reward_option=2, ecmp=False, child_prefix='as', po2=False, debug=0):
super().__init__(id, child_ids, bucket_size, weights, max_n_child, T0, reward_option, ecmp, child_prefix, debug)
self.po2 = po2 # power-of-2-choices
def choose_child(self, flow, nodes=None, ts=None):
# we still need to generate a bucket id to store the flow
bucket_id, _ = self._ecmp(*flow.fields, self._bucket_table, self._bucket_mask)
n_flow_on = self._counters['n_flow_on']
if self.debug > 1:
print("@nodeLBLSQ {} - n_flow_on: {}".format(self.id, n_flow_on))
# assert len(set(self.child_ids)) == len(self.child_ids)
if self.po2:
n_flow_on_2 = {i: n_flow_on[i] for i in random.sample(self.child_ids, 2)}
child_id = min(n_flow_on_2, key = n_flow_on_2.get)
if self.debug > 1:
print("n_flow_on chosen {} out of -".format(child_id), n_flow_on_2)
else:
min_n_flow = n_flow_on[self.child_ids].min()
n_flow_map = zip(self.child_ids, n_flow_on[self.child_ids])
min_ids = [k for k, v in n_flow_map if v == min_n_flow]
child_id = random.choice(min_ids)
n_flow_map = zip(self.child_ids, n_flow_on[self.child_ids])
if self.debug > 1:
print("n_flow_on chosen minimum {} from {}".format(child_id, '|'.join(['{}: {}'.format(k,v) for k, v in n_flow_map])))
del n_flow_map
return child_id, bucket_id
class NodeLBSED(NodeLB):
'''
@brief:
Shortest Expected Delay (SED) assigns server based on (queue_len+1)/weight.
'''
def __init__(self, id, child_ids, bucket_size=65536, weights=None, max_n_child=ACTION_DIM, T0=time.time(), reward_option=2, ecmp=False, child_prefix='as', po2=False, b_offset=B_OFFSET, debug=0):
super().__init__(id, child_ids, bucket_size, weights,
max_n_child, T0, reward_option, ecmp, child_prefix, debug)
self.po2 = po2 # power-of-2-choices
self.b_offset = b_offset
def choose_child(self, flow, nodes=None,ts=None):
# we still need to generate a bucket id to store the flow
bucket_id, _ = self._ecmp(
*flow.fields, self._bucket_table, self._bucket_mask)
n_flow_on = self._counters['n_flow_on']
if self.debug > 1:
print("@nodeLBLSQ {} - n_flow_on: {}".format(self.id, n_flow_on))
# assert len(set(self.child_ids)) == len(self.child_ids)
if self.po2:
n_flow_on_2 = {i: (self.b_offset+n_flow_on[i])/self.weights[i]
for i in random.sample(self.child_ids, 2)}
child_id = min(n_flow_on_2, key=n_flow_on_2.get)
if self.debug > 1:
print("n_flow_on chosen {} out of -".format(child_id), n_flow_on_2)
else:
score = [(self.b_offset+n_flow_on[i])/self.weights[i]
for i in self.child_ids]
min_n_flow = min(score)
n_flow_map = zip(self.child_ids, score)
min_ids = [k for k, v in n_flow_map if v == min_n_flow]
child_id = random.choice(min_ids)
if self.debug > 1:
n_flow_map = zip(self.child_ids, score)
print("score chosen minimum {} from {}".format(
child_id, '|'.join(['{}: {}'.format(k, v) for k, v in n_flow_map])))
del n_flow_map
return child_id, bucket_id
class NodeLBSRT(NodeLB):
'''
@brief:
Shortest remaining time (SRT) assigns AS based on sum(cpu_processing_time)/#cpu + sum(io_processing_time)/#io
'''
def __init__(self, id, child_ids, bucket_size=65536, weights=None, max_n_child=ACTION_DIM, T0=time.time(), reward_option=2, ecmp=False, child_prefix='as', po2=False, debug=0):
super().__init__(id, child_ids, bucket_size, weights,
max_n_child, T0, reward_option, ecmp, child_prefix, debug)
self.po2 = po2
def choose_child(self, flow, t_rest_all):
# we still need to generate a bucket id to store the flow
bucket_id, _ = self._ecmp(
*flow.fields, self._bucket_table, self._bucket_mask)
n_flow_on = self._counters['n_flow_on']
if self.debug > 1:
print("@nodeLBOracle {} - n_flow_on: {}".format(self.id, n_flow_on))
# assert len(set(self.child_ids)) == len(self.child_ids)
t_rest_map = zip(self.child_ids, t_rest_all)
if self.po2:
t_rest_2 = {i: t_rest_all[i] for i in random.sample(self.child_ids, 2)}
child_id = min(t_rest_2, key=t_rest_2.get)
if self.debug > 1:
print("n_flow_on chosen {} out of -".format(child_id), t_rest_2)
else:
min_t_rest = min(t_rest_all)
min_ids = [k for k, v in t_rest_map if v == min_t_rest]
child_id = random.choice(min_ids)
if self.debug > 1:
print("t_rest chosen minimum {} from {}".format(
child_id, '|'.join(['{}: {}'.format(k, v) for k, v in t_rest_map])))
del t_rest_map
return child_id, bucket_id
def receive(self, ts, flow, nodes):
'''
@brief:
data plane implementation
'''
assert flow.nexthop == self.id
flow.update_receive(ts, self.id)
# select based on actual
t_rest_all = [nodes['{}{:d}'.format(self.child_prefix, i)].get_t_rest_total(ts)
for i in self.child_ids]
child_id, bucket_id = self.choose_child(flow, t_rest_all)
# flow = self.evaluate_decision_ground_truth(nodes, child_id, flow)
if RENDER_RECEIVE:
self.render_receive(ts, flow, child_id, nodes)
# bucket is available, register flow
if self._bucket_table_avail[bucket_id]:
# register t_receive and chosen AS id]
self._tracked_flows[flow.id] = (ts, bucket_id, child_id)
self._counters['n_flow_on'][child_id] += 1
if self.debug > 1:
print(
"bucket {} available, tracking flow {} -> node {}".format(bucket_id, flow.id, child_id))
print('n_flow_on becomes',
self._counters['n_flow_on'][self.child_ids])
else:
if self.debug > 1:
print("bucket is not available, making flow untracked")
self.n_untracked_flow += 1
ts += self.get_process_delay() # add process delay
# for now, we only implement for ecmp_random
flow.update_send(ts, '{}{}'.format(self.child_prefix, child_id))
self.send(ts+self.get_t2neighbour(), flow)
nodes['{}{}'.format(self.child_prefix, child_id)
].update_pending_fct(flow)
class NodeLBGSQ(NodeLB):
'''
@brief:
select AS based on global shortest queue
'''
def __init__(self, id, child_ids, bucket_size=65536, weights=None, max_n_child=ACTION_DIM, T0=time.time(), reward_option=2, ecmp=False, child_prefix='as', po2=False, debug=0):
super().__init__(id, child_ids, bucket_size, weights,
max_n_child, T0, reward_option, ecmp, child_prefix, debug)
self.po2 = po2
def choose_child(self, flow, qlen_all):
# we still need to generate a bucket id to store the flow
bucket_id, _ = self._ecmp(
*flow.fields, self._bucket_table, self._bucket_mask)
if self.debug > 1:
print("@nodeLBOracle {} - n_flow_on: {}".format(self.id, n_flow_on))
if self.po2:
n_flow_on_2 = {v: qlen_all[i]
for i, v in random.sample(list(enumerate(self.child_ids)), 2)}
child_id = min(n_flow_on_2, key=n_flow_on_2.get)
if self.debug > 1:
print("n_flow_on chosen {} out of -".format(child_id), n_flow_on_2)
else:
n_flow_map = zip(self.child_ids, qlen_all)
min_n_flow = min(qlen_all)
min_ids = [k for k, v in n_flow_map if v == min_n_flow]
child_id = random.choice(min_ids)
if self.debug > 1:
print("n_flow_on chosen minimum {} from {}".format(
child_id, '|'.join(['{}: {}'.format(k, v) for k, v in n_flow_map])))
del n_flow_map
return child_id, bucket_id
def receive(self, ts, flow, nodes):
'''
@brief:
data plane implementation
'''
assert flow.nexthop == self.id
flow.update_receive(ts, self.id)
# select based on actual
qlen_all = [nodes['{}{:d}'.format(self.child_prefix, i)].get_n_flow_on()
for i in self.child_ids]
child_id, bucket_id = self.choose_child(flow, qlen_all)
# flow = self.evaluate_decision_ground_truth(nodes, child_id, flow)
if RENDER_RECEIVE:
self.render_receive(ts, flow, child_id, nodes)
# bucket is available, register flow
if self._bucket_table_avail[bucket_id]:
# register t_receive and chosen AS id]
self._tracked_flows[flow.id] = (ts, bucket_id, child_id)
self._counters['n_flow_on'][child_id] += 1
if self.debug > 1:
print(
"bucket {} available, tracking flow {} -> node {}".format(bucket_id, flow.id, child_id))
print('n_flow_on becomes',
self._counters['n_flow_on'][self.child_ids])
else:
if self.debug > 1:
print("bucket is not available, making flow untracked")
self.n_untracked_flow += 1
ts += self.get_process_delay() # add process delay
# for now, we only implement for ecmp_random
flow.update_send(ts, '{}{}'.format(self.child_prefix, child_id))
self.send(ts+self.get_t2neighbour(), flow)
nodes['{}{}'.format(self.child_prefix, child_id)
].update_pending_fct(flow)
class NodeLBActive(NodeLB):
def __init__(self, id, child_ids, bucket_size=65536, weights=None, max_n_child=ACTION_DIM, T0=time.time(), reward_option=2, ecmp=False, child_prefix='as', lb_period=LB_PERIOD, rtt_min=0.05, rtt_max=0.2, debug=0):
super().__init__(id, child_ids, bucket_size, weights,
max_n_child, T0, reward_option, ecmp, child_prefix, debug, lb_period)
self.alpha = HEURISTIC_ALPHA
self.rtt_min = rtt_min
self.rtt_max = rtt_max
self.lb_period = lb_period
assert 0 < self.alpha <= 1
def get_process_delay(self):
return random.uniform(self.rtt_min, self.rtt_max)
def step(self, ts, nodes=None):
'''
@brief:
calculate weights based on latest observation (number of on-going)
'''
# step 1: prediction
qlen_all = np.array([nodes['{}{:d}'.format(self.child_prefix, i)].get_n_flow_on()
for i in self.child_ids])
new_weights = np.zeros(self.max_n_child)
new_weights[self.child_ids] = max(qlen_all) - qlen_all
if self.debug > 1:
print(">> ({:.3f}s) in {}: origin weights {} - new weights {}".format(
ts, self.__class__, self.weights[self.child_ids], new_weights[self.child_ids]))
# step 2: apply weights
self.weights = self.alpha*new_weights+(1-self.alpha)*self.weights
if self.debug > 1:
print(">> ({:.3f}s) in {}: updated weights {}".format(
ts, self.__class__, self.weights[self.child_ids]))
if RENDER:
self.render(ts, nodes)
ts += self.get_process_delay()
self.register_event(ts, 'lb_update_bucket', {'node_id': self.id})
self.register_event(ts + self.lb_period,
'lb_step', {'node_id': self.id})
| 44.311594 | 216 | 0.593295 | 1,755 | 12,230 | 3.833618 | 0.100855 | 0.049049 | 0.04786 | 0.032105 | 0.822235 | 0.80217 | 0.799197 | 0.787604 | 0.769768 | 0.764269 | 0 | 0.01376 | 0.286917 | 12,230 | 275 | 217 | 44.472727 | 0.757711 | 0.108994 | 0 | 0.65445 | 0 | 0 | 0.079541 | 0 | 0 | 0 | 0 | 0 | 0.015707 | 1 | 0.068063 | false | 0 | 0.026178 | 0.005236 | 0.146597 | 0.104712 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
99a62d8028a7a6a573b8bb43bf70c6cd9012699f | 32 | py | Python | mybayes/__init__.py | dungvtdev/upsbayescpm | f6ee877c689046d3c57a2ac06742cfe4a0b6550e | [
"MIT"
] | null | null | null | mybayes/__init__.py | dungvtdev/upsbayescpm | f6ee877c689046d3c57a2ac06742cfe4a0b6550e | [
"MIT"
] | null | null | null | mybayes/__init__.py | dungvtdev/upsbayescpm | f6ee877c689046d3c57a2ac06742cfe4a0b6550e | [
"MIT"
] | null | null | null | from ._mybayes_wrapper import *
| 16 | 31 | 0.8125 | 4 | 32 | 6 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.125 | 32 | 1 | 32 | 32 | 0.857143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
99aa772b7337e4504119957618b28fecb4bbc834 | 30 | py | Python | telepict/ws/__init__.py | dpitch40/telepict | c0afcf0f726d8ac106dc8372cac2c003961e2327 | [
"MIT"
] | 1 | 2020-06-28T18:50:24.000Z | 2020-06-28T18:50:24.000Z | telepict/ws/__init__.py | dpitch40/telepict | c0afcf0f726d8ac106dc8372cac2c003961e2327 | [
"MIT"
] | 53 | 2020-07-04T01:21:24.000Z | 2021-08-29T23:16:21.000Z | telepict/ws/__init__.py | dpitch40/telepict | c0afcf0f726d8ac106dc8372cac2c003961e2327 | [
"MIT"
] | 1 | 2020-07-04T01:32:40.000Z | 2020-07-04T01:32:40.000Z | from .game import GameHandler
| 15 | 29 | 0.833333 | 4 | 30 | 6.25 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.133333 | 30 | 1 | 30 | 30 | 0.961538 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
41d5370151f1f98230e4ead47c2fe7b28a8e03db | 1,026 | py | Python | Python/cubesat2017/soft/desktop/app/test/virtual/receiver_test_virtual.py | Misha91908/Portfolio | c10b06462ec45f039778c77aa6c84e871cac34f6 | [
"MIT"
] | null | null | null | Python/cubesat2017/soft/desktop/app/test/virtual/receiver_test_virtual.py | Misha91908/Portfolio | c10b06462ec45f039778c77aa6c84e871cac34f6 | [
"MIT"
] | null | null | null | Python/cubesat2017/soft/desktop/app/test/virtual/receiver_test_virtual.py | Misha91908/Portfolio | c10b06462ec45f039778c77aa6c84e871cac34f6 | [
"MIT"
] | null | null | null | import sys
import pytest
import os
def test_is_valid_number_of_bytes():
counter = 0
valid_counter = 0
packet = receiver.receive_packet()
for i in range(len(packet)):
if len(packet[i]) == 65:
counter += 1
valid_counter += 1
else:
counter += 1
assert counter == valid_counter
def test_is_valid_startstop_test():
counter = 0
valid_counter = 0
packet = receiver.receive_packet()
for i in range(len(packet)):
if packet[i][0] == b'\xf1' and packet[i][len(packet[i]) - 1] == b'\xfa':
counter += 1
valid_counter += 1
else:
counter += 1
assert counter == valid_counter
def test_is_valid_command_test():
counter = 0
valid_counter = 0
packet = receiver.receive_packet()
for i in range(len(packet)):
if packet[i][1] == b'\xa0':
counter += 1
valid_counter += 1
else:
counter += 1
assert counter == valid_counter
| 21.375 | 80 | 0.563353 | 132 | 1,026 | 4.189394 | 0.25 | 0.195298 | 0.048825 | 0.075949 | 0.78481 | 0.78481 | 0.78481 | 0.78481 | 0.78481 | 0.78481 | 0 | 0.031977 | 0.329435 | 1,026 | 47 | 81 | 21.829787 | 0.771802 | 0 | 0 | 0.75 | 0 | 0 | 0.011696 | 0 | 0 | 0 | 0 | 0 | 0.083333 | 1 | 0.083333 | false | 0 | 0.083333 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
513f55d9d537e95c3e00b229018e899957a377aa | 86 | py | Python | kanweek/api/__init__.py | chmey/py-week-kanban | 0949d30b17ccb12e4ad2e3121ccd779293d35a07 | [
"MIT"
] | 1 | 2020-10-30T10:02:49.000Z | 2020-10-30T10:02:49.000Z | kanweek/api/__init__.py | chmey/py-week-kanban | 0949d30b17ccb12e4ad2e3121ccd779293d35a07 | [
"MIT"
] | 8 | 2020-10-30T16:52:45.000Z | 2020-12-13T20:27:52.000Z | kanweek/api/__init__.py | chmey/py-week-kanban | 0949d30b17ccb12e4ad2e3121ccd779293d35a07 | [
"MIT"
] | null | null | null | from .common import bpAPI # noqa
from .task import * # noqa
from .user import * # noqa | 28.666667 | 32 | 0.709302 | 13 | 86 | 4.692308 | 0.538462 | 0.262295 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.197674 | 86 | 3 | 33 | 28.666667 | 0.884058 | 0.162791 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
5152cdc124a1398d58d065928d4baecb2e9a336f | 104 | py | Python | auth/admin.py | Me-Diga/mediga | 17f9c6f191c5582dbe706db5cea5bd8fc8dc29dc | [
"MIT"
] | null | null | null | auth/admin.py | Me-Diga/mediga | 17f9c6f191c5582dbe706db5cea5bd8fc8dc29dc | [
"MIT"
] | null | null | null | auth/admin.py | Me-Diga/mediga | 17f9c6f191c5582dbe706db5cea5bd8fc8dc29dc | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.contrib.auth.models import User
admin.site.register(User)
| 20.8 | 43 | 0.826923 | 16 | 104 | 5.375 | 0.625 | 0.232558 | 0.395349 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.096154 | 104 | 4 | 44 | 26 | 0.914894 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.666667 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
5ab22b7bc25c25ea0140cb8cdad4684da162e119 | 20,379 | py | Python | tests/integration/test_scale_means.py | Crunch-io/crunch-cube | 80986d5b2106c774f05176fb6c6a5ea0d840f09d | [
"MIT"
] | 3 | 2021-01-22T20:42:31.000Z | 2021-06-02T17:53:19.000Z | tests/integration/test_scale_means.py | Crunch-io/crunch-cube | 80986d5b2106c774f05176fb6c6a5ea0d840f09d | [
"MIT"
] | 331 | 2017-11-13T22:41:56.000Z | 2021-12-02T21:59:43.000Z | tests/integration/test_scale_means.py | Crunch-io/crunch-cube | 80986d5b2106c774f05176fb6c6a5ea0d840f09d | [
"MIT"
] | 1 | 2021-02-19T02:49:00.000Z | 2021-02-19T02:49:00.000Z | # encoding: utf-8
"""Integration tests for scale-mean measures and marginals."""
import numpy as np
import pytest
from cr.cube.cube import Cube
from ..fixtures import CR, SM
def test_ca_cat_x_items():
slice_ = Cube(SM.CA_CAT_X_ITEMS).partitions[0]
np.testing.assert_almost_equal(
slice_.columns_scale_mean, [1.50454821, 3.11233766, 3.35788192, 3.33271833]
)
assert slice_.rows_scale_mean is None
assert slice_.rows_scale_mean_margin is None
def test_ca_items_x_cat():
slice_ = Cube(SM.CA_ITEMS_X_CAT).partitions[0]
assert slice_.columns_scale_mean is None
np.testing.assert_almost_equal(
slice_.rows_scale_mean, [1.50454821, 3.11233766, 3.35788192, 3.33271833]
)
assert slice_.columns_scale_mean_margin is None
def test_ca_itmes_x_cat_var_scale_means():
# These 2 fixtures represent 1 dataset and its transpose version
slice_ = Cube(SM.CA_ITEMS_X_CAT).partitions[0]
slice2_ = Cube(SM.CA_CAT_X_ITEMS).partitions[0]
# Testing that the scale means (row and col) are equal on the 2 diverse
# datasets
assert slice_.rows_scale_mean_stddev == pytest.approx(
slice2_.columns_scale_mean_stddev
)
assert slice2_._columns_scale_mean_variance == pytest.approx(
[2.56410909, 5.17893869, 4.75445248, 4.81611278],
)
assert slice2_.rows_scale_mean_stddev is None
assert slice_.columns_scale_mean_stddev is None
def test_ca_x_mr():
slice_ = Cube(SM.CA_X_MR).partitions[0]
np.testing.assert_almost_equal(
slice_.columns_scale_mean, [1.29787234, 1.8, 1.48730964, np.nan]
)
assert slice_.rows_scale_mean is None
assert slice_.rows_scale_mean_margin is None
assert slice_.columns_scale_mean_margin == 1.504548211036992
slice_ = Cube(SM.CA_X_MR).partitions[1]
np.testing.assert_almost_equal(
slice_.columns_scale_mean, [3.31746032, 3.10743802, 3.09976976, np.nan]
)
assert slice_.rows_scale_mean is None
slice_ = Cube(SM.CA_X_MR).partitions[2]
np.testing.assert_almost_equal(
slice_.columns_scale_mean, [3.31205674, 3.23913043, 3.37745455, np.nan]
)
assert slice_.rows_scale_mean is None
slice_ = Cube(SM.CA_X_MR).partitions[3]
np.testing.assert_almost_equal(
slice_.columns_scale_mean, [3.53676471, 3.34814815, 3.3147877, np.nan]
)
assert slice_.rows_scale_mean is None
def test_cat_x_ca_cat_x_items():
slice_ = Cube(SM.CAT_X_CA_CAT_X_ITEMS).partitions[0]
np.testing.assert_almost_equal(
slice_.columns_scale_mean, [1.34545455, 2.46938776, 2.7037037, 2.65454545]
)
assert slice_.rows_scale_mean is None
slice_ = Cube(SM.CAT_X_CA_CAT_X_ITEMS).partitions[1]
np.testing.assert_almost_equal(
slice_.columns_scale_mean, [1.41935484, 3.25663717, 3.48, 3.58536585]
)
assert slice_.rows_scale_mean is None
slice_ = Cube(SM.CAT_X_CA_CAT_X_ITEMS).partitions[2]
np.testing.assert_almost_equal(
slice_.columns_scale_mean, [1.49429038, 3.44905009, 3.59344262, 3.53630363]
)
assert slice_.rows_scale_mean is None
slice_ = Cube(SM.CAT_X_CA_CAT_X_ITEMS).partitions[3]
np.testing.assert_almost_equal(
slice_.columns_scale_mean, [1.43365696, 3.02816901, 3.37987013, 3.32107023]
)
assert slice_.rows_scale_mean is None
slice_ = Cube(SM.CAT_X_CA_CAT_X_ITEMS).partitions[4]
np.testing.assert_almost_equal(
slice_.columns_scale_mean, [1.22670025, 2.49473684, 2.79848866, 2.78987342]
)
assert slice_.rows_scale_mean is None
slice_ = Cube(SM.CAT_X_CA_CAT_X_ITEMS).partitions[5]
np.testing.assert_almost_equal(
slice_.columns_scale_mean, [2.53061224, 3.68421053, 3.9862069, 4.03472222]
)
assert slice_.rows_scale_mean is None
def test_cat_x_cat():
slice_ = Cube(SM.CAT_X_CAT).partitions[0]
np.testing.assert_almost_equal(
slice_.columns_scale_mean, [2.6009281, 2.3522267, 2.3197279, 3.3949192]
)
np.testing.assert_almost_equal(
slice_.rows_scale_mean,
[1.43636364, 2.45238095, 2.4730832, 2.68387097, 2.8375, 2.15540541],
)
# Test ScaleMeans marginal
assert slice_.rows_scale_mean_margin == 2.536319612590799
assert slice_.columns_scale_mean_margin == 2.6846246973365617
def test_cat_hs_x_cat_hs_var_scale_means():
slice_ = Cube(CR.ECON_BLAME_X_IDEOLOGY_ROW_AND_COL_HS).partitions[0]
assert slice_.rows_scale_mean_stddev is not None
assert slice_.columns_scale_mean_stddev is not None
assert slice_.rows_scale_mean_stddev == pytest.approx(
[0.943031, 0.9677583, 1.1680149, 0.9817768, 1.8856181, 1.5987533]
)
assert slice_.columns_scale_mean_stddev == pytest.approx(
[0.7195463, 0.7196963, 0.9977753, 0.9169069, 1.0608933, 1.0948414, 1.5740076]
)
assert slice_._columns_scale_mean_variance == pytest.approx(
[0.51774691, 0.51796281, 0.99555556, 0.84071826, 1.12549449, 1.19867769, 2.4775]
)
def test_cat_x_mr():
slice_ = Cube(SM.CAT_X_MR).partitions[0]
np.testing.assert_almost_equal(
slice_.columns_scale_mean, [2.45070423, 2.54471545, 2.54263006, np.nan]
)
assert slice_.rows_scale_mean is None
assert slice_.rows_scale_mean_margin is None
assert slice_.columns_scale_mean_margin == 2.5323565323565322
def test_cat_x_cat_with_hs():
# Test without H&S
transforms = {
"columns_dimension": {"insertions": {}},
"rows_dimension": {"insertions": {}},
}
slice_ = Cube(CR.ECON_BLAME_X_IDEOLOGY_ROW_HS, transforms=transforms).partitions[0]
np.testing.assert_almost_equal(
slice_.columns_scale_mean,
[2.19444444, 2.19230769, 2.26666667, 1.88990826, 1.76363636, 3.85],
)
np.testing.assert_almost_equal(
slice_.rows_scale_mean,
[3.87368421, 2.51767677, 3.38429752, 3.66666667, 4.13235294],
)
# Test with H&S
slice_ = Cube(CR.ECON_BLAME_X_IDEOLOGY_ROW_HS).partitions[0]
np.testing.assert_almost_equal(
slice_.columns_scale_mean,
[2.19444444, 2.19230769, 2.26666667, 1.88990826, 1.76363636, 3.85],
)
np.testing.assert_almost_equal(
slice_.rows_scale_mean,
[3.87368421, 2.51767677, 3.0851689, 3.38429752, 3.66666667, 4.13235294],
)
def test_ca_x_mr_with_hs_and_pruning():
transforms = {
"columns_dimension": {"insertions": {}},
"rows_dimension": {"insertions": {}},
}
slice_ = Cube(CR.CA_X_MR_HS, transforms=transforms).partitions[0]
np.testing.assert_almost_equal(
slice_.columns_scale_mean, [2.50818336, 2.56844883, 2.90251939, np.nan]
)
assert slice_.rows_scale_mean is None
slice_ = Cube(CR.CA_X_MR_HS, transforms=transforms).partitions[1]
np.testing.assert_almost_equal(
slice_.columns_scale_mean, [2.78385708, 2.69292009, 3.11594714, np.nan]
)
assert slice_.rows_scale_mean is None
slice_ = Cube(CR.CA_X_MR_HS, transforms=transforms).partitions[2]
np.testing.assert_almost_equal(
slice_.columns_scale_mean, [np.nan, np.nan, np.nan, np.nan]
)
assert slice_.rows_scale_mean is None
transforms = {
"rows_dimension": {"prune": True},
"columns_dimension": {"prune": True},
}
slice_ = Cube(CR.CA_X_MR_HS, transforms=transforms).partitions[0]
np.testing.assert_almost_equal(
slice_.columns_scale_mean, [2.50818336, 2.56844883, 2.90251939]
)
assert slice_.rows_scale_mean is None
slice_ = Cube(CR.CA_X_MR_HS, transforms=transforms).partitions[1]
np.testing.assert_almost_equal(
slice_.columns_scale_mean, [2.78385708, 2.69292009, 3.11594714]
)
assert slice_.rows_scale_mean is None
slice_ = Cube(CR.CA_X_MR_HS, transforms=transforms).partitions[2]
np.testing.assert_almost_equal(slice_.columns_scale_mean, [])
assert slice_.rows_scale_mean is None
def test_cat_x_cat_pruning_and_hs():
transforms = {
"columns_dimension": {"insertions": {}},
"rows_dimension": {"insertions": {}},
}
slice_ = Cube(CR.CAT_HS_MT_X_CAT_HS_MT, transforms=transforms).partitions[0]
np.testing.assert_almost_equal(
slice_.columns_scale_mean,
[1.57933884, 2.10618401, 2.30460074, np.nan, 2.34680135],
)
np.testing.assert_almost_equal(
slice_.rows_scale_mean,
[1.74213625, 1.97, 2.45356177, 2.11838791, np.nan, 2.0],
)
# Just H&S
slice_ = Cube(CR.CAT_HS_MT_X_CAT_HS_MT).partitions[0]
np.testing.assert_almost_equal(
slice_.columns_scale_mean,
[1.57933884, 1.8308135, 2.10618401, 2.30460074, np.nan, 2.34680135],
),
np.testing.assert_almost_equal(
slice_.rows_scale_mean,
[1.74213625, 2.2364515, 1.97, 2.45356177, 2.11838791, np.nan, 2.0],
)
# Just pruning
transforms = {
"rows_dimension": {"prune": True},
"columns_dimension": {"prune": True},
}
slice_ = Cube(CR.CAT_HS_MT_X_CAT_HS_MT, transforms=transforms).partitions[0]
np.testing.assert_almost_equal(
slice_.columns_scale_mean,
[1.57933884, 1.83081353, 2.10618401, 2.30460074, 2.34680135],
)
np.testing.assert_almost_equal(
slice_.rows_scale_mean,
[1.74213625, 2.2364515, 1.97, 2.45356177, 2.11838791, 2.0],
)
# Pruning and H&S
transforms = {
"rows_dimension": {"insertions": {}, "prune": True},
"columns_dimension": {"insertions": {}, "prune": True},
}
slice_ = Cube(CR.CAT_HS_MT_X_CAT_HS_MT, transforms=transforms).partitions[0]
np.testing.assert_almost_equal(
slice_.columns_scale_mean, [1.57933884, 2.106184, 2.3046007, 2.34680135]
),
np.testing.assert_almost_equal(
slice_.rows_scale_mean, [1.74213625, 1.97, 2.45356177, 2.11838791, 2.0]
)
def test_cat_x_cat_scale_means_margin():
slice_ = Cube(SM.CAT_X_CAT_SM_MARGIN).partitions[0]
assert slice_.columns_scale_mean_margin == 2.6846246973365617
assert slice_.rows_scale_mean_margin == 2.536319612590799
def test_cat_x_ca_subvar_scale_means():
slice_ = Cube(CR.FRUIT_X_PETS_ARRAY_SUBVARS_FIRST).partitions[0]
assert slice_._columns_scale_mean_variance == pytest.approx(
[0.2054321, 0.24, 0.22558594]
)
assert slice_.columns_scale_mean_stddev == pytest.approx(
[0.4532462, 0.4898979, 0.4749589]
)
assert slice_.rows_scale_mean_stddev is None
slice_ = Cube(CR.FRUIT_X_PETS_ARRAY_SUBVARS_FIRST).partitions[1]
assert slice_._columns_scale_mean_variance == pytest.approx(
[0.2283737, 0.21, 0.21606648]
)
assert slice_.columns_scale_mean_stddev == pytest.approx(
[0.4778846, 0.4582576, 0.4648295]
)
assert slice_.rows_scale_mean is None
def test_cat_x_cat_pruning_and_hs_var_scale_means():
transforms = {
"columns_dimension": {"insertions": {}},
"rows_dimension": {"insertions": {}},
}
slice_ = Cube(CR.CAT_HS_MT_X_CAT_HS_MT, transforms=transforms).partitions[0]
assert slice_._columns_scale_mean_variance == pytest.approx(
[1.4459092, 2.14619102, 2.40430987, np.nan, 0.87972883], nan_ok=True
)
assert slice_.columns_scale_mean_stddev == pytest.approx(
[1.2024596, 1.4649884, 1.5505837, np.nan, 0.9379386], nan_ok=True
)
assert slice_.rows_scale_mean_stddev == pytest.approx(
[0.8506362, 0.9995499, 1.3697947, 0.6971257, np.nan, 0.8164966], nan_ok=True
)
# Just H&S
slice_ = Cube(CR.CAT_HS_MT_X_CAT_HS_MT).partitions[0]
assert slice_._columns_scale_mean_variance == pytest.approx(
[1.4459092, 1.8494177, 2.14619102, 2.40430987, np.nan, 0.87972883], nan_ok=True
)
assert slice_.columns_scale_mean_stddev == pytest.approx(
[1.2024596, 1.359933, 1.4649884, 1.5505837, np.nan, 0.9379386], nan_ok=True
)
assert slice_.rows_scale_mean_stddev == pytest.approx(
[0.8506362, 1.0412664, 0.9995499, 1.3697947, 0.6971257, np.nan, 0.8164966],
nan_ok=True,
)
# Just pruning
transforms = {
"rows_dimension": {"prune": True},
"columns_dimension": {"prune": True},
}
slice_ = Cube(CR.CAT_HS_MT_X_CAT_HS_MT, transforms=transforms).partitions[0]
assert slice_._columns_scale_mean_variance == pytest.approx(
[1.4459092, 1.8494177, 2.14619102, 2.40430987, 0.87972883]
)
assert slice_.columns_scale_mean_stddev == pytest.approx(
[1.2024596, 1.359933, 1.4649884, 1.5505837, 0.9379386]
)
assert slice_.rows_scale_mean_stddev == pytest.approx(
[0.8506362, 1.0412664, 0.9995499, 1.3697947, 0.6971257, 0.8164966]
)
# Pruning and H&S
transforms = {
"rows_dimension": {"insertions": {}, "prune": True},
"columns_dimension": {"insertions": {}, "prune": True},
}
slice_ = Cube(CR.CAT_HS_MT_X_CAT_HS_MT, transforms=transforms).partitions[0]
assert slice_._columns_scale_mean_variance == pytest.approx(
[1.4459092, 2.14619102, 2.40430987, 0.87972883]
)
assert slice_.columns_scale_mean_stddev == pytest.approx(
[1.2024596, 1.4649884, 1.5505837, 0.9379386]
)
assert slice_.rows_scale_mean_stddev == pytest.approx(
[0.8506362, 0.9995499, 1.3697947, 0.6971257, 0.8164966]
)
def test_cat_nps_numval_x_cat_var_scale_means():
slice_ = Cube(SM.CAT_NPS_NUMVAL_X_CAT).partitions[0]
assert slice_._columns_scale_mean_variance == pytest.approx(
[1905.11600238, 2111.67820069, 1655.65636907, 981.86821176],
)
assert slice_.columns_scale_mean_stddev == pytest.approx(
[43.6476346, 45.9529999, 40.6897575, 31.3347764],
)
assert slice_.rows_scale_mean_stddev is None
def test_cat_single_element_x_cat():
slice_ = Cube(SM.CAT_SINGLE_ELEMENT_X_CAT).partitions[0]
np.testing.assert_equal(slice_.columns_scale_mean, [np.nan, np.nan, np.nan, np.nan])
np.testing.assert_equal(slice_.rows_scale_mean, [np.nan])
def test_means_univariate_cat():
strand = Cube(CR.ECON_BLAME_WITH_HS).partitions[0]
np.testing.assert_almost_equal(strand.scale_mean, [2.1735205616850553])
def test_means_bivariate_cat():
slice_ = Cube(CR.ECON_BLAME_X_IDEOLOGY_ROW_HS).partitions[0]
np.testing.assert_almost_equal(
slice_.columns_scale_mean,
[2.19444444, 2.19230769, 2.26666667, 1.88990826, 1.76363636, 3.85],
)
def test_means_cat_x_mr():
slice_ = Cube(CR.FRUIT_X_PETS).partitions[0]
np.testing.assert_almost_equal(
slice_.columns_scale_mean, [1.7, 1.6470588, 1.6842105]
)
assert slice_.rows_scale_mean is None
def test_means_mr_x_cat():
slice_ = Cube(CR.PETS_X_FRUIT).partitions[0]
assert slice_.columns_scale_mean is None
np.testing.assert_almost_equal(slice_.rows_scale_mean, [1.7, 1.6470588, 1.6842105])
def test_means_cat_array_cat_dim_first():
slice_ = Cube(CR.PETS_ARRAY_CAT_FIRST).partitions[0]
assert slice_.columns_scale_mean is None
np.testing.assert_almost_equal(
slice_.rows_scale_mean, [1.44333002, 1.48049069, 1.57881177]
)
def test_means_cat_array_subvar_dim_first():
slice_ = Cube(CR.PETS_ARRAY_SUBVAR_FIRST).partitions[0]
np.testing.assert_almost_equal(
slice_.columns_scale_mean, [1.44333002, 1.48049069, 1.57881177]
)
assert slice_.rows_scale_mean is None
def test_means_cat_x_cat_arr_fruit_first():
slice_ = Cube(CR.FRUIT_X_PETS_ARRAY).partitions[0]
assert slice_.columns_scale_mean is None
np.testing.assert_almost_equal(slice_.rows_scale_mean, [1.48, 1.4285714, 1.5217391])
slice_ = Cube(CR.FRUIT_X_PETS_ARRAY).partitions[1]
assert slice_.columns_scale_mean is None
np.testing.assert_almost_equal(
slice_.rows_scale_mean, [1.40740741, 1.53846154, 1.55319149]
)
def test_means_cat_x_cat_arr_subvars_first():
slice_ = Cube(CR.FRUIT_X_PETS_ARRAY_SUBVARS_FIRST).partitions[0]
np.testing.assert_almost_equal(
slice_.columns_scale_mean, [1.71111111, 1.6, 1.65625]
)
assert slice_.rows_scale_mean is None
slice_ = Cube(CR.FRUIT_X_PETS_ARRAY_SUBVARS_FIRST).partitions[1]
np.testing.assert_almost_equal(
slice_.columns_scale_mean, [1.64705882, 1.7, 1.68421053]
)
assert slice_.rows_scale_mean is None
def test_means_cat_x_cat_arr_pets_first():
slice_ = Cube(CR.FRUIT_X_PETS_ARRAY_PETS_FIRST).partitions[0]
np.testing.assert_almost_equal(slice_.columns_scale_mean, [1.48, 1.40740741])
np.testing.assert_almost_equal(slice_.rows_scale_mean, [1.71111111, 1.64705882])
slice_ = Cube(CR.FRUIT_X_PETS_ARRAY_PETS_FIRST).partitions[1]
np.testing.assert_almost_equal(slice_.columns_scale_mean, [1.42857143, 1.53846154])
np.testing.assert_almost_equal(slice_.rows_scale_mean, [1.6, 1.7])
slice_ = Cube(CR.FRUIT_X_PETS_ARRAY_PETS_FIRST).partitions[2]
np.testing.assert_almost_equal(slice_.columns_scale_mean, [1.52173913, 1.55319149])
np.testing.assert_almost_equal(slice_.rows_scale_mean, [1.65625, 1.68421053])
def test_means_with_null_values():
slice_ = Cube(CR.SCALE_WITH_NULL_VALUES).partitions[0]
np.testing.assert_almost_equal(
slice_.columns_scale_mean, [1.2060688, 1.0669344, 1.023199]
)
assert slice_.rows_scale_mean is None
def test_mean_univariate_cat_var_scale_mean():
# Test nonmissing with no null numeric values
strand = Cube(SM.UNIVARIATE_CAT).partitions[0]
assert strand.scale_mean == pytest.approx(2.686585)
# Test nonmissing with null numeric value
strand = Cube(SM.UNIVARIATE_CAT_WITH_NULL_NUMERIC_VALUE).partitions[0]
assert strand.scale_mean == pytest.approx(2.744010)
# Test with all null numeric value
strand = Cube(SM.UNIVARIATE_CAT_WITH_ALL_NULL_NUMERIC_VALUE).partitions[0]
assert strand.scale_mean is None
def test_mr_x_cat():
slice_ = Cube(SM.MR_X_CAT).partitions[0]
assert slice_.columns_scale_mean is None
np.testing.assert_almost_equal(
slice_.rows_scale_mean, [2.45070423, 2.54471545, 2.54263006, np.nan]
)
assert slice_.rows_scale_mean_margin == 2.5323565323565322
assert slice_.columns_scale_mean_margin is None
def test_rows_and_new_rows_scale_mean_stddev_for_fruit_x_pets_array():
slice_ = Cube(CR.FRUIT_X_PETS_ARRAY).partitions[0]
assert slice_._columns_scale_mean_variance is None
assert slice_.rows_scale_mean_stddev == pytest.approx(
[0.4995998, 0.4948717, 0.4995272]
)
slice_ = Cube(CR.FRUIT_X_PETS_ARRAY).partitions[1]
assert slice_.rows_scale_mean_stddev == pytest.approx(
[0.4913518, 0.4985185, 0.4971626]
)
def test_univariate_cat():
strand = Cube(SM.UNIVARIATE_CAT).partitions[0]
np.testing.assert_almost_equal(strand.scale_mean, [2.6865854])
def test_univariate_cat_with_hiding():
strand_ = Cube(SM.BOLSHEVIK_HAIR).partitions[0]
np.testing.assert_almost_equal(strand_.scale_mean, [1.504548211])
# Appling hiding transforms
transforms = {
"rows_dimension": {"elements": {"5": {"hide": True}, "4": {"hide": True}}}
}
strand_with_hiding_ = Cube(SM.BOLSHEVIK_HAIR, transforms=transforms).partitions[0]
np.testing.assert_almost_equal(strand_.scale_mean, strand_with_hiding_.scale_mean)
def test_univariate_with_hs():
# Test without H&S
transforms = {
"columns_dimension": {"insertions": {}},
"rows_dimension": {"insertions": {}},
}
strand = Cube(CR.ECON_BLAME_WITH_HS, transforms).partitions[0]
np.testing.assert_almost_equal(strand.scale_mean, [2.17352056])
# Test with H&S
strand = Cube(CR.ECON_BLAME_WITH_HS).partitions[0]
np.testing.assert_almost_equal(strand.scale_mean, [2.17352056])
def test_univariate_with_hs_scale_means_row():
# Test without H&S
transforms = {
"columns_dimension": {"insertions": {}},
"rows_dimension": {"insertions": {}},
}
strand = Cube(CR.ECON_BLAME_WITH_HS, transforms).partitions[0]
assert strand.scale_mean == pytest.approx(2.1735206)
# Test with H&S
strand = Cube(CR.ECON_BLAME_WITH_HS).partitions[0]
assert strand.scale_mean == pytest.approx(2.1735206)
def test_univariate_ca_subvar_with_empty_total_counts():
strand = Cube(SM.UNIVARIATE_CA_SUBVAR).partitions[0]
# --- scale_meanm, scale_std_dev and scale_std_err can be None when
# --- _total_weighted_count is 0.
assert strand.scale_mean is None
assert strand.scale_std_dev is None
assert strand.scale_std_err is None
| 36.069027 | 88 | 0.711222 | 2,971 | 20,379 | 4.508583 | 0.106361 | 0.096081 | 0.081224 | 0.103471 | 0.83561 | 0.803136 | 0.785816 | 0.763121 | 0.713624 | 0.66368 | 0 | 0.155571 | 0.17518 | 20,379 | 564 | 89 | 36.132979 | 0.641323 | 0.031896 | 0 | 0.449541 | 0 | 0 | 0.029592 | 0 | 0 | 0 | 0 | 0 | 0.321101 | 1 | 0.077982 | false | 0 | 0.009174 | 0 | 0.087156 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
cf8c9ec9c3787aa89e4181b2c953998606b408b2 | 22 | py | Python | src/django-aurora/aurora/apps/accounts/models.py | arantesdv/python-django-project | 01adfd62a0fd47641f151d1bc7e5db2c2ea6d00a | [
"MIT"
] | 1 | 2020-04-22T22:34:26.000Z | 2020-04-22T22:34:26.000Z | src/django-aurora/aurora/apps/accounts/models.py | arantesdv/python-django-project | 01adfd62a0fd47641f151d1bc7e5db2c2ea6d00a | [
"MIT"
] | 9 | 2021-03-19T02:17:08.000Z | 2022-03-12T00:25:34.000Z | src/django-aurora/aurora/apps/accounts/models.py | arantesdv/python-django-project | 01adfd62a0fd47641f151d1bc7e5db2c2ea6d00a | [
"MIT"
] | null | null | null | from .models3 import * | 22 | 22 | 0.772727 | 3 | 22 | 5.666667 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.052632 | 0.136364 | 22 | 1 | 22 | 22 | 0.842105 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
cfad1f029a82307d0efa3b79749d29e118d89f66 | 476 | py | Python | tests/data/sync/typing_py3.py | ntninja/unasync | 36991657efb04aa6c39dbaa89f1d87330c3f24b4 | [
"Apache-2.0",
"MIT"
] | null | null | null | tests/data/sync/typing_py3.py | ntninja/unasync | 36991657efb04aa6c39dbaa89f1d87330c3f24b4 | [
"Apache-2.0",
"MIT"
] | null | null | null | tests/data/sync/typing_py3.py | ntninja/unasync | 36991657efb04aa6c39dbaa89f1d87330c3f24b4 | [
"Apache-2.0",
"MIT"
] | null | null | null | # fmt: off
# A forward-reference typed function that returns an iterator for an (a)sync iterable
def aiter1(a: "typing.Iterable[int]") -> 'typing.Iterable[int]':
return a.__iter__()
# Same as the above but using tripple-quoted strings
def aiter2(a: """typing.Iterable[int]""") -> r'''typing.Iterable[int]''':
return a.__iter__()
# Same as the above but without forward-references
def aiter3(a: typing.Iterable[int]) -> typing.Iterable[int]:
return a.__iter__()
# fmt: on
| 34 | 85 | 0.716387 | 72 | 476 | 4.569444 | 0.486111 | 0.255319 | 0.31003 | 0.164134 | 0.468085 | 0.468085 | 0.468085 | 0.468085 | 0.468085 | 0.468085 | 0 | 0.007246 | 0.130252 | 476 | 13 | 86 | 36.615385 | 0.78744 | 0.420168 | 0 | 0.5 | 0 | 0 | 0.296296 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.5 | false | 0 | 0 | 0.5 | 1 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 6 |
321109813a49c164bf969e852132106c55ed9a4e | 224 | py | Python | openaerostruct/docs/aero_walkthrough/part_7.py | lamkina/OpenAeroStruct | d30e2626fc1272e7fe3a27386c4c663157e958ec | [
"Apache-2.0"
] | null | null | null | openaerostruct/docs/aero_walkthrough/part_7.py | lamkina/OpenAeroStruct | d30e2626fc1272e7fe3a27386c4c663157e958ec | [
"Apache-2.0"
] | null | null | null | openaerostruct/docs/aero_walkthrough/part_7.py | lamkina/OpenAeroStruct | d30e2626fc1272e7fe3a27386c4c663157e958ec | [
"Apache-2.0"
] | null | null | null | assert_near_equal(prob["aero_point_0.wing_perf.CD"][0], 0.033389699871650073, 1e-6)
assert_near_equal(prob["aero_point_0.wing_perf.CL"][0], 0.5, 1e-6)
assert_near_equal(prob["aero_point_0.CM"][1], -1.7885550372372376, 1e-6)
| 56 | 83 | 0.772321 | 43 | 224 | 3.697674 | 0.418605 | 0.188679 | 0.283019 | 0.358491 | 0.685535 | 0.685535 | 0.685535 | 0.685535 | 0.685535 | 0 | 0 | 0.232558 | 0.040179 | 224 | 3 | 84 | 74.666667 | 0.506977 | 0 | 0 | 0 | 0 | 0 | 0.290179 | 0.223214 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | true | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
5c6a2164514d0f22059c997e517009f026cd41be | 341 | py | Python | CoV19/bots/views.py | just-ary27/CovBot-revamp | 31af847237c4c5e7d5086a78950d06ecfd81318f | [
"MIT"
] | 1 | 2021-05-12T18:44:30.000Z | 2021-05-12T18:44:30.000Z | CoV19/bots/views.py | just-ary27/CovBot-revamp | 31af847237c4c5e7d5086a78950d06ecfd81318f | [
"MIT"
] | 2 | 2021-09-22T18:41:37.000Z | 2022-02-10T09:28:52.000Z | CoV19/bots/views.py | just-ary27/CovBot-revamp | 31af847237c4c5e7d5086a78950d06ecfd81318f | [
"MIT"
] | null | null | null | from django.shortcuts import render
# Create your views here.
def bots(request):
return render(request,'bots/bots.html')
def features(request):
return render(request,"bots/features.html")
def commands(request):
return render(request,"bots/commands.html")
def tutorial(request):
return render(request,"bots/tutorial.html") | 24.357143 | 47 | 0.744868 | 45 | 341 | 5.644444 | 0.377778 | 0.204724 | 0.299213 | 0.409449 | 0.472441 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.129032 | 341 | 14 | 48 | 24.357143 | 0.855219 | 0.067449 | 0 | 0 | 0 | 0 | 0.214511 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.444444 | false | 0 | 0.111111 | 0.444444 | 1 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 6 |
5c7d5b4f6b5506c9fb7ff91a472e5609be355990 | 157 | py | Python | net/__init__.py | Future-of-Frontier/mhf-fake-client | 232f9d38c410c4d050d4ae7f8070a9e77d6db9a3 | [
"MIT"
] | null | null | null | net/__init__.py | Future-of-Frontier/mhf-fake-client | 232f9d38c410c4d050d4ae7f8070a9e77d6db9a3 | [
"MIT"
] | null | null | null | net/__init__.py | Future-of-Frontier/mhf-fake-client | 232f9d38c410c4d050d4ae7f8070a9e77d6db9a3 | [
"MIT"
] | 3 | 2019-12-14T07:03:50.000Z | 2020-10-08T17:58:52.000Z | from .crypto import *
from .packet import *
from .socket_file_wrapper import *
from .constructs import *
from .util import *
from .packet_ids import PacketID | 26.166667 | 34 | 0.783439 | 22 | 157 | 5.454545 | 0.5 | 0.416667 | 0.266667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.146497 | 157 | 6 | 35 | 26.166667 | 0.895522 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
5c89610d1de6307978d8f1978d9b4ddecdf06eb3 | 17,623 | py | Python | tovp/promotions/urls.py | nrsimha/tovp | 311bc957c95c294811d737f5df30b0a218d35610 | [
"MIT"
] | null | null | null | tovp/promotions/urls.py | nrsimha/tovp | 311bc957c95c294811d737f5df30b0a218d35610 | [
"MIT"
] | null | null | null | tovp/promotions/urls.py | nrsimha/tovp | 311bc957c95c294811d737f5df30b0a218d35610 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from django.conf.urls import include, url
from . import views
from .models import (NrsimhaTile, GoldenBrick, GuruParamparaBrick,
RadhaMadhavaBrick, SilverCoin, GadadharCoin, AdvaitaCoin,
GoldCoin, PlatinumCoin, RadharaniCoin,
SquareFeet, SquareMeter, Trustee, GeneralDonation)
from .forms import (NrsimhaTileForm, GoldenBrickForm, GuruParamparaBrickForm,
RadhaMadhavaBrickForm, SilverCoinForm, GadadharCoinForm,
AdvaitaCoinForm, GoldCoinForm, PlatinumCoinForm,
RadharaniCoinForm, SquareFeetForm, SquareMeterForm,
TrusteeForm, GeneralDonationForm)
urlpatterns = [
url(r'^nrsimha-tile/', include(
[
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/create/$',
view=views.BrickCreateView.as_view(
model=NrsimhaTile,
form_class=NrsimhaTileForm,
),
name='create'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/$',
view=views.BrickDetailView.as_view(
model=NrsimhaTile,
),
name='detail'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/update/$',
view=views.BrickUpdateView.as_view(
model=NrsimhaTile,
form_class=NrsimhaTileForm,
),
name='update'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/delete/$',
view=views.PromotionDeleteView.as_view(
model=NrsimhaTile,
),
name='delete'
),
], namespace="nrsimha-tile")),
url(r'^golden-brick/', include(
[
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/create/$',
view=views.BrickCreateView.as_view(
model=GoldenBrick,
form_class=GoldenBrickForm,
),
name='create'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/$',
view=views.BrickDetailView.as_view(
model=GoldenBrick,
),
name='detail'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/update/$',
view=views.BrickUpdateView.as_view(
model=GoldenBrick,
form_class=GoldenBrickForm,
),
name='update'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/delete/$',
view=views.PromotionDeleteView.as_view(
model=GoldenBrick,
),
name='delete'
),
], namespace="golden-brick")),
url(r'^guru-parampara-brick/', include(
[
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/create/$',
view=views.BrickCreateView.as_view(
model=GuruParamparaBrick,
form_class=GuruParamparaBrickForm,
),
name='create'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/$',
view=views.BrickDetailView.as_view(
model=GuruParamparaBrick,
),
name='detail'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/update/$',
view=views.BrickUpdateView.as_view(
model=GuruParamparaBrick,
form_class=GuruParamparaBrickForm,
),
name='update'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/delete/$',
view=views.PromotionDeleteView.as_view(
model=GuruParamparaBrick,
),
name='delete'
),
], namespace="guru-parampara-brick")),
url(r'^radha-madhava-brick/', include(
[
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/create/$',
view=views.BrickCreateView.as_view(
model=RadhaMadhavaBrick,
form_class=RadhaMadhavaBrickForm,
),
name='create'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/$',
view=views.BrickDetailView.as_view(
model=RadhaMadhavaBrick,
),
name='detail'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/update/$',
view=views.BrickUpdateView.as_view(
model=RadhaMadhavaBrick,
form_class=RadhaMadhavaBrickForm,
),
name='update'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/delete/$',
view=views.PromotionDeleteView.as_view(
model=RadhaMadhavaBrick,
),
name='delete'
),
], namespace="radha-madhava-brick")),
url(r'^srivas-coin/', include(
[
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/create/$',
view=views.CoinCreateView.as_view(
model=SilverCoin,
form_class=SilverCoinForm,
),
name='create'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/$',
view=views.CoinDetailView.as_view(
model=SilverCoin,
),
name='detail'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/update/$',
view=views.CoinUpdateView.as_view(
model=SilverCoin,
form_class=SilverCoinForm,
),
name='update'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/delete/$',
view=views.PromotionDeleteView.as_view(
model=SilverCoin,
),
name='delete'
),
], namespace="srivas-coin")),
url(r'^gadadhar-coin/', include(
[
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/create/$',
view=views.CoinCreateView.as_view(
model=GadadharCoin,
form_class=GadadharCoinForm,
),
name='create'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/$',
view=views.CoinDetailView.as_view(
model=GadadharCoin,
),
name='detail'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/update/$',
view=views.CoinUpdateView.as_view(
model=GadadharCoin,
form_class=GadadharCoinForm,
),
name='update'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/delete/$',
view=views.PromotionDeleteView.as_view(
model=GadadharCoin,
),
name='delete'
),
], namespace="gadadhar-coin")),
url(r'^advaita-coin/', include(
[
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/create/$',
view=views.CoinCreateView.as_view(
model=AdvaitaCoin,
form_class=AdvaitaCoinForm,
),
name='create'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/$',
view=views.CoinDetailView.as_view(
model=AdvaitaCoin,
),
name='detail'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/update/$',
view=views.CoinUpdateView.as_view(
model=AdvaitaCoin,
form_class=AdvaitaCoinForm,
),
name='update'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/delete/$',
view=views.PromotionDeleteView.as_view(
model=AdvaitaCoin,
),
name='delete'
),
], namespace="advaita-coin")),
url(r'^gold-coin/', include(
[
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/create/$',
view=views.CoinCreateView.as_view(
model=GoldCoin,
form_class=GoldCoinForm,
),
name='create'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/$',
view=views.CoinDetailView.as_view(
model=GoldCoin,
),
name='detail'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/update/$',
view=views.CoinUpdateView.as_view(
model=GoldCoin,
form_class=GoldCoinForm,
),
name='update'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/delete/$',
view=views.PromotionDeleteView.as_view(
model=GoldCoin,
),
name='delete'
),
], namespace="nityananda-coin")),
url(r'^platinum-coin/', include(
[
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/create/$',
view=views.CoinCreateView.as_view(
model=PlatinumCoin,
form_class=PlatinumCoinForm,
),
name='create'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/$',
view=views.CoinDetailView.as_view(
model=PlatinumCoin,
),
name='detail'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/update/$',
view=views.CoinUpdateView.as_view(
model=PlatinumCoin,
form_class=PlatinumCoinForm,
),
name='update'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/delete/$',
view=views.PromotionDeleteView.as_view(
model=PlatinumCoin,
),
name='delete'
),
], namespace="caitanya-coin")),
url(r'^radharani-coin/', include(
[
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/create/$',
view=views.CoinCreateView.as_view(
model=RadharaniCoin,
form_class=RadharaniCoinForm,
),
name='create'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/$',
view=views.CoinDetailView.as_view(
model=RadharaniCoin,
),
name='detail'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/update/$',
view=views.CoinUpdateView.as_view(
model=RadharaniCoin,
form_class=RadharaniCoinForm,
),
name='update'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/delete/$',
view=views.PromotionDeleteView.as_view(
model=RadharaniCoin,
),
name='delete'
),
], namespace="radharani-coin")),
url(r'^square-feet/', include(
[
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/create/$',
view=views.FeetCreateView.as_view(
model=SquareFeet,
form_class=SquareFeetForm,
),
name='create'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/$',
view=views.FeetDetailView.as_view(
model=SquareFeet,
),
name='detail'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/update/$',
view=views.FeetUpdateView.as_view(
model=SquareFeet,
form_class=SquareFeetForm,
),
name='update'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/delete/$',
view=views.PromotionDeleteView.as_view(
model=SquareFeet,
),
name='delete'
),
], namespace="square-feet")),
url(r'^square-meter/', include(
[
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/create/$',
view=views.FeetCreateView.as_view(
model=SquareMeter,
form_class=SquareMeterForm,
),
name='create'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/$',
view=views.FeetDetailView.as_view(
model=SquareMeter,
),
name='detail'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/update/$',
view=views.FeetUpdateView.as_view(
model=SquareMeter,
form_class=SquareMeterForm,
),
name='update'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/delete/$',
view=views.PromotionDeleteView.as_view(
model=SquareMeter,
),
name='delete'
),
], namespace="square-meter")),
url(r'^trustee/', include(
[
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/create/$',
view=views.TrusteeCreateView.as_view(
model=Trustee,
form_class=TrusteeForm,
),
name='create'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/$',
view=views.TrusteeDetailView.as_view(
model=Trustee,
),
name='detail'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/update/$',
view=views.TrusteeUpdateView.as_view(
model=Trustee,
form_class=TrusteeForm,
),
name='update'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/delete/$',
view=views.PromotionDeleteView.as_view(
model=Trustee,
),
name='delete'
),
], namespace="trustee")),
url(r'^general-donation/', include(
[
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/create/$',
view=views.GeneralDonationCreateView.as_view(
model=GeneralDonation,
form_class=GeneralDonationForm,
),
name='create'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/$',
view=views.GeneralDonationDetailView.as_view(
model=GeneralDonation,
),
name='detail'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/update/$',
view=views.GeneralDonationUpdateView.as_view(
model=GeneralDonation,
form_class=GeneralDonationForm,
),
name='update'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/delete/$',
view=views.PromotionDeleteView.as_view(
model=GeneralDonation,
),
name='delete'
),
], namespace="general-donation")),
]
| 36.714583 | 85 | 0.413948 | 1,534 | 17,623 | 4.627771 | 0.064537 | 0.047331 | 0.055219 | 0.078884 | 0.833779 | 0.776025 | 0.776025 | 0.776025 | 0.625018 | 0.625018 | 0 | 0.0001 | 0.429893 | 17,623 | 479 | 86 | 36.791232 | 0.70648 | 0.001192 | 0 | 0.871579 | 0 | 0.058947 | 0.214205 | 0.175057 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.008421 | 0 | 0.008421 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
7a2d010c26431e718efe0c6efd7834acbd1e4426 | 42 | py | Python | Common/__init__.py | PesyCorm/TestTask | 2ae4c96ffe92a9d77dba8af537a1941d723edf25 | [
"MIT"
] | null | null | null | Common/__init__.py | PesyCorm/TestTask | 2ae4c96ffe92a9d77dba8af537a1941d723edf25 | [
"MIT"
] | null | null | null | Common/__init__.py | PesyCorm/TestTask | 2ae4c96ffe92a9d77dba8af537a1941d723edf25 | [
"MIT"
] | null | null | null | from .browser_control import switch_window | 42 | 42 | 0.904762 | 6 | 42 | 6 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.071429 | 42 | 1 | 42 | 42 | 0.923077 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
7a4153d658d3b1adb2677e7f846d4da2de2a70f0 | 117 | py | Python | wepppy/export/__init__.py | hwbeeson/wepppy | 6358552df99853c75be8911e7ef943108ae6923e | [
"BSD-3-Clause"
] | null | null | null | wepppy/export/__init__.py | hwbeeson/wepppy | 6358552df99853c75be8911e7ef943108ae6923e | [
"BSD-3-Clause"
] | null | null | null | wepppy/export/__init__.py | hwbeeson/wepppy | 6358552df99853c75be8911e7ef943108ae6923e | [
"BSD-3-Clause"
] | null | null | null | from .export import *
from .arc_export import arc_export, has_arc_export
from .ermit_input import create_ermit_input
| 29.25 | 50 | 0.846154 | 19 | 117 | 4.842105 | 0.421053 | 0.293478 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.111111 | 117 | 3 | 51 | 39 | 0.884615 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
7a844bb9fc4f68599f346c60547656f5393624ca | 29 | py | Python | editor/components/sidebar/__init__.py | xpenalosa/PyTextEditor | 7c5f7e6c74143c0885477f838b9253660cd80b9b | [
"MIT"
] | 1 | 2020-07-12T20:44:05.000Z | 2020-07-12T20:44:05.000Z | editor/components/sidebar/__init__.py | xpenalosa/PyTextEditor | 7c5f7e6c74143c0885477f838b9253660cd80b9b | [
"MIT"
] | null | null | null | editor/components/sidebar/__init__.py | xpenalosa/PyTextEditor | 7c5f7e6c74143c0885477f838b9253660cd80b9b | [
"MIT"
] | 1 | 2021-08-24T15:32:48.000Z | 2021-08-24T15:32:48.000Z | from .sidebar import Sidebar
| 14.5 | 28 | 0.827586 | 4 | 29 | 6 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.137931 | 29 | 1 | 29 | 29 | 0.96 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
7aba50760d293c926ed24ca013f4ff2ddc373cf4 | 285 | py | Python | gym_tacto/envs/__init__.py | ErickRosete/gym_tacto | 892e1c8b94c956fe5abfd70132edd0b86c977902 | [
"MIT"
] | 2 | 2021-07-22T04:06:44.000Z | 2022-02-14T03:39:24.000Z | gym_tacto/envs/__init__.py | ErickRosete/gym_tacto | 892e1c8b94c956fe5abfd70132edd0b86c977902 | [
"MIT"
] | null | null | null | gym_tacto/envs/__init__.py | ErickRosete/gym_tacto | 892e1c8b94c956fe5abfd70132edd0b86c977902 | [
"MIT"
] | null | null | null | from gym_tacto.envs.sawyer_peg_v0 import SawyerPegEnv as SawyerPegV0
from gym_tacto.envs.sawyer_peg_v1 import SawyerPegEnv as SawyerPegV1
from gym_tacto.envs.sawyer_door_v0 import SawyerDoorEnv as SawyerDoorV0
from gym_tacto.envs.sawyer_grasp_v0 import SawyerGraspEnv as SawyerGraspV0
| 57 | 74 | 0.887719 | 44 | 285 | 5.477273 | 0.431818 | 0.116183 | 0.19917 | 0.26556 | 0.390041 | 0.207469 | 0 | 0 | 0 | 0 | 0 | 0.030651 | 0.084211 | 285 | 4 | 75 | 71.25 | 0.89272 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
8f9c7a9d890736f7724c21a2161ecf9f9b21ec2f | 11,358 | py | Python | sdk/python/build/lib/pulumi_databricks/__init__.py | ingenii-solutions/pulumi-databricks | f03ecc4e190a4e59eb635663f6408350dcab42ea | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2021-12-10T07:35:59.000Z | 2022-03-23T22:53:55.000Z | sdk/python/pulumi_databricks/__init__.py | ingenii-solutions/pulumi-databricks | f03ecc4e190a4e59eb635663f6408350dcab42ea | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_databricks/__init__.py | ingenii-solutions/pulumi-databricks | f03ecc4e190a4e59eb635663f6408350dcab42ea | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from . import _utilities
import typing
# Export this package's modules as members:
from .provider import *
# Make subpackages available:
if typing.TYPE_CHECKING:
import pulumi_databricks.config as config
import pulumi_databricks.databricks as databricks
else:
config = _utilities.lazy_import('pulumi_databricks.config')
databricks = _utilities.lazy_import('pulumi_databricks.databricks')
_utilities.register(
resource_modules="""
[
{
"pkg": "databricks",
"mod": "databricks/awsS3Mount",
"fqn": "pulumi_databricks.databricks",
"classes": {
"databricks:databricks/awsS3Mount:AwsS3Mount": "AwsS3Mount"
}
},
{
"pkg": "databricks",
"mod": "databricks/azureAdlsGen1Mount",
"fqn": "pulumi_databricks.databricks",
"classes": {
"databricks:databricks/azureAdlsGen1Mount:AzureAdlsGen1Mount": "AzureAdlsGen1Mount"
}
},
{
"pkg": "databricks",
"mod": "databricks/azureAdlsGen2Mount",
"fqn": "pulumi_databricks.databricks",
"classes": {
"databricks:databricks/azureAdlsGen2Mount:AzureAdlsGen2Mount": "AzureAdlsGen2Mount"
}
},
{
"pkg": "databricks",
"mod": "databricks/azureBlobMount",
"fqn": "pulumi_databricks.databricks",
"classes": {
"databricks:databricks/azureBlobMount:AzureBlobMount": "AzureBlobMount"
}
},
{
"pkg": "databricks",
"mod": "databricks/azureDbfsFile",
"fqn": "pulumi_databricks.databricks",
"classes": {
"databricks:databricks/azureDbfsFile:AzureDbfsFile": "AzureDbfsFile"
}
},
{
"pkg": "databricks",
"mod": "databricks/catalog",
"fqn": "pulumi_databricks.databricks",
"classes": {
"databricks:databricks/catalog:Catalog": "Catalog"
}
},
{
"pkg": "databricks",
"mod": "databricks/cluster",
"fqn": "pulumi_databricks.databricks",
"classes": {
"databricks:databricks/cluster:Cluster": "Cluster"
}
},
{
"pkg": "databricks",
"mod": "databricks/clusterPolicy",
"fqn": "pulumi_databricks.databricks",
"classes": {
"databricks:databricks/clusterPolicy:ClusterPolicy": "ClusterPolicy"
}
},
{
"pkg": "databricks",
"mod": "databricks/databricksMount",
"fqn": "pulumi_databricks.databricks",
"classes": {
"databricks:databricks/databricksMount:DatabricksMount": "DatabricksMount"
}
},
{
"pkg": "databricks",
"mod": "databricks/directory",
"fqn": "pulumi_databricks.databricks",
"classes": {
"databricks:databricks/directory:Directory": "Directory"
}
},
{
"pkg": "databricks",
"mod": "databricks/globalInitScript",
"fqn": "pulumi_databricks.databricks",
"classes": {
"databricks:databricks/globalInitScript:GlobalInitScript": "GlobalInitScript"
}
},
{
"pkg": "databricks",
"mod": "databricks/grants",
"fqn": "pulumi_databricks.databricks",
"classes": {
"databricks:databricks/grants:Grants": "Grants"
}
},
{
"pkg": "databricks",
"mod": "databricks/group",
"fqn": "pulumi_databricks.databricks",
"classes": {
"databricks:databricks/group:Group": "Group"
}
},
{
"pkg": "databricks",
"mod": "databricks/groupInstanceProfile",
"fqn": "pulumi_databricks.databricks",
"classes": {
"databricks:databricks/groupInstanceProfile:GroupInstanceProfile": "GroupInstanceProfile"
}
},
{
"pkg": "databricks",
"mod": "databricks/groupMember",
"fqn": "pulumi_databricks.databricks",
"classes": {
"databricks:databricks/groupMember:GroupMember": "GroupMember"
}
},
{
"pkg": "databricks",
"mod": "databricks/iPAccessList",
"fqn": "pulumi_databricks.databricks",
"classes": {
"databricks:databricks/iPAccessList:IPAccessList": "IPAccessList"
}
},
{
"pkg": "databricks",
"mod": "databricks/instancePool",
"fqn": "pulumi_databricks.databricks",
"classes": {
"databricks:databricks/instancePool:InstancePool": "InstancePool"
}
},
{
"pkg": "databricks",
"mod": "databricks/instanceProfile",
"fqn": "pulumi_databricks.databricks",
"classes": {
"databricks:databricks/instanceProfile:InstanceProfile": "InstanceProfile"
}
},
{
"pkg": "databricks",
"mod": "databricks/job",
"fqn": "pulumi_databricks.databricks",
"classes": {
"databricks:databricks/job:Job": "Job"
}
},
{
"pkg": "databricks",
"mod": "databricks/library",
"fqn": "pulumi_databricks.databricks",
"classes": {
"databricks:databricks/library:Library": "Library"
}
},
{
"pkg": "databricks",
"mod": "databricks/mLFlowExperiment",
"fqn": "pulumi_databricks.databricks",
"classes": {
"databricks:databricks/mLFlowExperiment:MLFlowExperiment": "MLFlowExperiment"
}
},
{
"pkg": "databricks",
"mod": "databricks/mLFlowModel",
"fqn": "pulumi_databricks.databricks",
"classes": {
"databricks:databricks/mLFlowModel:MLFlowModel": "MLFlowModel"
}
},
{
"pkg": "databricks",
"mod": "databricks/metastore",
"fqn": "pulumi_databricks.databricks",
"classes": {
"databricks:databricks/metastore:Metastore": "Metastore"
}
},
{
"pkg": "databricks",
"mod": "databricks/metastoreAssignment",
"fqn": "pulumi_databricks.databricks",
"classes": {
"databricks:databricks/metastoreAssignment:MetastoreAssignment": "MetastoreAssignment"
}
},
{
"pkg": "databricks",
"mod": "databricks/metastoreDataAccess",
"fqn": "pulumi_databricks.databricks",
"classes": {
"databricks:databricks/metastoreDataAccess:MetastoreDataAccess": "MetastoreDataAccess"
}
},
{
"pkg": "databricks",
"mod": "databricks/mwsCredentials",
"fqn": "pulumi_databricks.databricks",
"classes": {
"databricks:databricks/mwsCredentials:MwsCredentials": "MwsCredentials"
}
},
{
"pkg": "databricks",
"mod": "databricks/mwsCustomerManagedKeys",
"fqn": "pulumi_databricks.databricks",
"classes": {
"databricks:databricks/mwsCustomerManagedKeys:MwsCustomerManagedKeys": "MwsCustomerManagedKeys"
}
},
{
"pkg": "databricks",
"mod": "databricks/mwsLogDelivery",
"fqn": "pulumi_databricks.databricks",
"classes": {
"databricks:databricks/mwsLogDelivery:MwsLogDelivery": "MwsLogDelivery"
}
},
{
"pkg": "databricks",
"mod": "databricks/mwsNetworks",
"fqn": "pulumi_databricks.databricks",
"classes": {
"databricks:databricks/mwsNetworks:MwsNetworks": "MwsNetworks"
}
},
{
"pkg": "databricks",
"mod": "databricks/mwsPrivateAccessSettings",
"fqn": "pulumi_databricks.databricks",
"classes": {
"databricks:databricks/mwsPrivateAccessSettings:MwsPrivateAccessSettings": "MwsPrivateAccessSettings"
}
},
{
"pkg": "databricks",
"mod": "databricks/mwsStorageConfigurations",
"fqn": "pulumi_databricks.databricks",
"classes": {
"databricks:databricks/mwsStorageConfigurations:MwsStorageConfigurations": "MwsStorageConfigurations"
}
},
{
"pkg": "databricks",
"mod": "databricks/mwsVpcEndpoint",
"fqn": "pulumi_databricks.databricks",
"classes": {
"databricks:databricks/mwsVpcEndpoint:MwsVpcEndpoint": "MwsVpcEndpoint"
}
},
{
"pkg": "databricks",
"mod": "databricks/mwsWorkspaces",
"fqn": "pulumi_databricks.databricks",
"classes": {
"databricks:databricks/mwsWorkspaces:MwsWorkspaces": "MwsWorkspaces"
}
},
{
"pkg": "databricks",
"mod": "databricks/notebook",
"fqn": "pulumi_databricks.databricks",
"classes": {
"databricks:databricks/notebook:Notebook": "Notebook"
}
},
{
"pkg": "databricks",
"mod": "databricks/oboToken",
"fqn": "pulumi_databricks.databricks",
"classes": {
"databricks:databricks/oboToken:OboToken": "OboToken"
}
},
{
"pkg": "databricks",
"mod": "databricks/permissions",
"fqn": "pulumi_databricks.databricks",
"classes": {
"databricks:databricks/permissions:Permissions": "Permissions"
}
},
{
"pkg": "databricks",
"mod": "databricks/pipeline",
"fqn": "pulumi_databricks.databricks",
"classes": {
"databricks:databricks/pipeline:Pipeline": "Pipeline"
}
},
{
"pkg": "databricks",
"mod": "databricks/repo",
"fqn": "pulumi_databricks.databricks",
"classes": {
"databricks:databricks/repo:Repo": "Repo"
}
},
{
"pkg": "databricks",
"mod": "databricks/schema",
"fqn": "pulumi_databricks.databricks",
"classes": {
"databricks:databricks/schema:Schema": "Schema"
}
},
{
"pkg": "databricks",
"mod": "databricks/secret",
"fqn": "pulumi_databricks.databricks",
"classes": {
"databricks:databricks/secret:Secret": "Secret"
}
},
{
"pkg": "databricks",
"mod": "databricks/secretAcl",
"fqn": "pulumi_databricks.databricks",
"classes": {
"databricks:databricks/secretAcl:SecretAcl": "SecretAcl"
}
},
{
"pkg": "databricks",
"mod": "databricks/secretScope",
"fqn": "pulumi_databricks.databricks",
"classes": {
"databricks:databricks/secretScope:SecretScope": "SecretScope"
}
},
{
"pkg": "databricks",
"mod": "databricks/servicePrincipal",
"fqn": "pulumi_databricks.databricks",
"classes": {
"databricks:databricks/servicePrincipal:ServicePrincipal": "ServicePrincipal"
}
},
{
"pkg": "databricks",
"mod": "databricks/sqlDashboard",
"fqn": "pulumi_databricks.databricks",
"classes": {
"databricks:databricks/sqlDashboard:SqlDashboard": "SqlDashboard"
}
},
{
"pkg": "databricks",
"mod": "databricks/sqlEndpoint",
"fqn": "pulumi_databricks.databricks",
"classes": {
"databricks:databricks/sqlEndpoint:SqlEndpoint": "SqlEndpoint"
}
},
{
"pkg": "databricks",
"mod": "databricks/sqlGlobalConfig",
"fqn": "pulumi_databricks.databricks",
"classes": {
"databricks:databricks/sqlGlobalConfig:SqlGlobalConfig": "SqlGlobalConfig"
}
},
{
"pkg": "databricks",
"mod": "databricks/sqlPermissions",
"fqn": "pulumi_databricks.databricks",
"classes": {
"databricks:databricks/sqlPermissions:SqlPermissions": "SqlPermissions"
}
},
{
"pkg": "databricks",
"mod": "databricks/sqlQuery",
"fqn": "pulumi_databricks.databricks",
"classes": {
"databricks:databricks/sqlQuery:SqlQuery": "SqlQuery"
}
},
{
"pkg": "databricks",
"mod": "databricks/sqlVisualization",
"fqn": "pulumi_databricks.databricks",
"classes": {
"databricks:databricks/sqlVisualization:SqlVisualization": "SqlVisualization"
}
},
{
"pkg": "databricks",
"mod": "databricks/sqlWidget",
"fqn": "pulumi_databricks.databricks",
"classes": {
"databricks:databricks/sqlWidget:SqlWidget": "SqlWidget"
}
},
{
"pkg": "databricks",
"mod": "databricks/token",
"fqn": "pulumi_databricks.databricks",
"classes": {
"databricks:databricks/token:Token": "Token"
}
},
{
"pkg": "databricks",
"mod": "databricks/user",
"fqn": "pulumi_databricks.databricks",
"classes": {
"databricks:databricks/user:User": "User"
}
},
{
"pkg": "databricks",
"mod": "databricks/userInstanceProfile",
"fqn": "pulumi_databricks.databricks",
"classes": {
"databricks:databricks/userInstanceProfile:UserInstanceProfile": "UserInstanceProfile"
}
},
{
"pkg": "databricks",
"mod": "databricks/workspaceConf",
"fqn": "pulumi_databricks.databricks",
"classes": {
"databricks:databricks/workspaceConf:WorkspaceConf": "WorkspaceConf"
}
}
]
""",
resource_packages="""
[
{
"pkg": "databricks",
"token": "pulumi:providers:databricks",
"fqn": "pulumi_databricks",
"class": "Provider"
}
]
"""
)
| 24.373391 | 104 | 0.675735 | 906 | 11,358 | 8.396247 | 0.122517 | 0.289207 | 0.191403 | 0.184567 | 0.406731 | 0.397529 | 0.397529 | 0 | 0 | 0 | 0 | 0.001338 | 0.14448 | 11,358 | 465 | 105 | 24.425806 | 0.781517 | 0.021747 | 0 | 0.356674 | 1 | 0 | 0.966952 | 0.530212 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.015317 | 0 | 0.015317 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
8fa311fa39d2e6cb9d8857c6c3aa60f2f360674f | 45 | py | Python | ezpykit/__init__.py | fakegit/mo-han-toolbox | 9d5bbc1fe7f12040715d3a0d3f320a1ad617aed8 | [
"MIT"
] | null | null | null | ezpykit/__init__.py | fakegit/mo-han-toolbox | 9d5bbc1fe7f12040715d3a0d3f320a1ad617aed8 | [
"MIT"
] | null | null | null | ezpykit/__init__.py | fakegit/mo-han-toolbox | 9d5bbc1fe7f12040715d3a0d3f320a1ad617aed8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from .common import *
| 15 | 22 | 0.711111 | 7 | 45 | 4.571429 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.025641 | 0.133333 | 45 | 2 | 23 | 22.5 | 0.794872 | 0.466667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
8902b77b25df369f0bdffd775750fbfed6aed45c | 41 | py | Python | sys_version.py | nna6/ei_swc_2017 | 7a1924ab1e5b87baeed72a405b1caf64480b08e2 | [
"MIT"
] | null | null | null | sys_version.py | nna6/ei_swc_2017 | 7a1924ab1e5b87baeed72a405b1caf64480b08e2 | [
"MIT"
] | null | null | null | sys_version.py | nna6/ei_swc_2017 | 7a1924ab1e5b87baeed72a405b1caf64480b08e2 | [
"MIT"
] | null | null | null | import sys
print('version is', sys.argv)
| 13.666667 | 29 | 0.731707 | 7 | 41 | 4.285714 | 0.857143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.121951 | 41 | 2 | 30 | 20.5 | 0.833333 | 0 | 0 | 0 | 0 | 0 | 0.243902 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.5 | 0 | 0.5 | 0.5 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 1 | 0 | 6 |
8f2624ae9a9ed1ff32221d6ed171c31d5cfbc524 | 200 | py | Python | cowin_core/models/__init__.py | iverson2937/cowinaddons | 58205012623207696c19b3f558ebfdb929961c3b | [
"MIT"
] | null | null | null | cowin_core/models/__init__.py | iverson2937/cowinaddons | 58205012623207696c19b3f558ebfdb929961c3b | [
"MIT"
] | null | null | null | cowin_core/models/__init__.py | iverson2937/cowinaddons | 58205012623207696c19b3f558ebfdb929961c3b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from . import cowin_project,cowin_fund,fund_project_rel,cowin_visit,cowin_applicant,cowin_search_visit,cowin_invest_decision_applicant,cowin_invest_decision_committee_summary
| 50 | 174 | 0.865 | 28 | 200 | 5.642857 | 0.571429 | 0.126582 | 0.240506 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005263 | 0.05 | 200 | 3 | 175 | 66.666667 | 0.826316 | 0.105 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
8f46c102ed5056b14db55a10a8ccb84ff098b035 | 96 | py | Python | venv/lib/python3.8/site-packages/poetry/installation/installer.py | Retraces/UkraineBot | 3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71 | [
"MIT"
] | 2 | 2022-03-13T01:58:52.000Z | 2022-03-31T06:07:54.000Z | venv/lib/python3.8/site-packages/poetry/installation/installer.py | DesmoSearch/Desmobot | b70b45df3485351f471080deb5c785c4bc5c4beb | [
"MIT"
] | 19 | 2021-11-20T04:09:18.000Z | 2022-03-23T15:05:55.000Z | venv/lib/python3.8/site-packages/poetry/installation/installer.py | DesmoSearch/Desmobot | b70b45df3485351f471080deb5c785c4bc5c4beb | [
"MIT"
] | null | null | null | /home/runner/.cache/pip/pool/4a/bd/9f/068439dd2f4bfdcc984a3953db75cc729021557b0b650803cc63d5f1b8 | 96 | 96 | 0.895833 | 9 | 96 | 9.555556 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.416667 | 0 | 96 | 1 | 96 | 96 | 0.479167 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | null | 0 | 0 | null | null | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
56b66eb8885fa5715da7f814bb0950a65e96aaff | 6,217 | py | Python | 02.2_Simple_LCA_co_products.py | massimopizzol/B4B | 46ce9d0bb69178fb17714f547d3bb7fc4a23e870 | [
"MIT"
] | 11 | 2019-01-18T09:47:03.000Z | 2021-09-09T10:58:31.000Z | 02.2_Simple_LCA_co_products.py | Su-Ko/B4B | 32f4a3dab4d6ac3e4501a6075bf9b6eda9e704f8 | [
"MIT"
] | 2 | 2020-10-14T20:07:13.000Z | 2020-10-22T09:09:40.000Z | 02.2_Simple_LCA_co_products.py | Su-Ko/B4B | 32f4a3dab4d6ac3e4501a6075bf9b6eda9e704f8 | [
"MIT"
] | 1 | 2021-04-14T16:46:44.000Z | 2021-04-14T16:46:44.000Z | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 4 21:03:55 2017
@author: massimo
"""
from brightway2 import *
t_db1 = Database("testdb")
t_db1.write({
("testdb", "Electricity production"):{
'name':'Electricity production',
'unit': 'kWh',
'exchanges': [{
'input': ('testdb', 'Fuel production'),
'amount': 2,
'unit': 'kg',
'type': 'technosphere'
},{
'input': ('testdb', 'Carbon dioxide'),
'amount': 1,
'unit': 'kg',
'type': 'biosphere'
},{
'input': ('testdb', 'Sulphur dioxide'),
'amount': 0.1,
'unit': 'kg',
'type': 'biosphere'
},{
'input': ('testdb', 'Electricity production'), #important to write the same process name in output
'amount': 10,
'unit': 'kWh',
'type': 'production'
},{
'input': ('testdb', 'Heat production'),
'amount': -3,
'unit': 'MJ',
'type': 'technosphere'
}]
},
('testdb', 'Fuel production'):{
'name': 'Fuel production',
'unit': 'kg',
'exchanges':[{
'input': ('testdb', 'Carbon dioxide'),
'amount': 10,
'unit': 'kg',
'type': 'biosphere'
},{
'input': ('testdb', 'Sulphur dioxide'),
'amount': 2,
'unit': 'kg',
'type': 'biosphere'
},{
'input': ('testdb', 'Crude oil'),
'amount': -50,
'unit': 'kg',
'type': 'biosphere'
},{
'input': ('testdb', 'Fuel production'),
'amount': 100,
'unit': 'kg',
'type': 'production'
}]
},
('testdb', 'Heat production'):{
'name': 'Heat production',
'unit': 'MJ',
'exchanges':[{
'input': ('testdb', 'Carbon dioxide'),
'amount': 10000, # some exaggerated nr...
'unit': 'kg',
'type': 'biosphere'
},{
'input': ('testdb', 'Heat production'),
'amount': 3,
'unit': 'MJ',
'type': 'production'
}]
},
('testdb', 'Carbon dioxide'):{'name': 'Carbon dioxide', 'unit':'kg', 'type': 'biosphere'},
('testdb', 'Sulphur dioxide'):{'name': 'Sulphur dioxide', 'unit':'kg', 'type': 'biosphere'},
('testdb', 'Crude oil'):{'name': 'Crude oil', 'unit':'kg', 'type': 'biosphere'}
})
# Or just do like this:
t_db2 = Database("testdb")
t_db2.write({
("testdb", "Electricity production"):{
'name':'Electricity production',
'unit': 'kWh',
'exchanges': [{
'input': ('testdb', 'Fuel production'),
'amount': 2,
'unit': 'kg',
'type': 'technosphere'
},{
'input': ('testdb', 'Carbon dioxide'),
'amount': 1,
'unit': 'kg',
'type': 'biosphere'
},{
'input': ('testdb', 'Sulphur dioxide'),
'amount': 0.1,
'unit': 'kg',
'type': 'biosphere'
},{
'input': ('testdb', 'Electricity production'), #important to write the same process name in output
'amount': 10,
'unit': 'kWh',
'type': 'production'
},{
'input': ('testdb', 'Heat production'),
'amount': 3,
'unit': 'MJ',
'type': 'substitution'
}]
},
('testdb', 'Fuel production'):{
'name': 'Fuel production',
'unit': 'kg',
'exchanges':[{
'input': ('testdb', 'Carbon dioxide'),
'amount': 10,
'unit': 'kg',
'type': 'biosphere'
},{
'input': ('testdb', 'Sulphur dioxide'),
'amount': 2,
'unit': 'kg',
'type': 'biosphere'
},{
'input': ('testdb', 'Crude oil'),
'amount': -50,
'unit': 'kg',
'type': 'biosphere'
},{
'input': ('testdb', 'Fuel production'),
'amount': 100,
'unit': 'kg',
'type': 'production'
}]
},
('testdb', 'Heat production'):{
'name': 'Heat production',
'unit': 'MJ',
'exchanges':[{
'input': ('testdb', 'Carbon dioxide'),
'amount': 10000, # some exaggerated nr...
'unit': 'kg',
'type': 'biosphere'
},{
'input': ('testdb', 'Heat production'),
'amount': 3,
'unit': 'MJ',
'type': 'production'
}]
},
('testdb', 'Carbon dioxide'):{'name': 'Carbon dioxide', 'unit':'kg', 'type': 'biosphere'},
('testdb', 'Sulphur dioxide'):{'name': 'Sulphur dioxide', 'unit':'kg', 'type': 'biosphere'},
('testdb', 'Crude oil'):{'name': 'Crude oil', 'unit':'kg', 'type': 'biosphere'}
})
# Create a LCIA method.
myLCIAdata = [[('testdb', 'Carbon dioxide'), 2.0],
[('testdb', 'Sulphur dioxide'), 2.0],
[('testdb', 'Crude oil'), 2.0]]
method_key = ('simplemethod', 'imaginaryendpoint', 'imaginarymidpoint')
my_method = Method(method_key)
my_method.validate(myLCIAdata)
my_method.register()
my_method.write(myLCIAdata)
my_method.load()
# Compare the two
functional_unit1 = {t_db1.get("Electricity production") : 1000}
lca1 = LCA(functional_unit1, method_key)
lca1.lci()
lca1.lcia()
print(lca1.inventory)
print(lca1.score)
functional_unit2 = {t_db2.get("Electricity production") : 1000}
lca2 = LCA(functional_unit2, method_key)
lca2.lci()
lca2.lcia()
print(lca2.inventory)
print(lca2.score)
lca1.score == lca2.score
| 30.326829 | 114 | 0.421103 | 495 | 6,217 | 5.250505 | 0.189899 | 0.055406 | 0.084648 | 0.131589 | 0.740285 | 0.740285 | 0.740285 | 0.740285 | 0.740285 | 0.740285 | 0 | 0.024189 | 0.394885 | 6,217 | 204 | 115 | 30.47549 | 0.666667 | 0.04552 | 0 | 0.8 | 0 | 0 | 0.334854 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.005714 | 0 | 0.005714 | 0.022857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
56d730c54b677aca8679ba2c632bc1fe675c1c15 | 33 | py | Python | arduino_yun/samples/__init__.py | abhirocks1211/countly-sdk-iot-python | 0ccc5120661c5e356d6a569b31ba5fb135fa8efb | [
"MIT"
] | 9 | 2016-04-06T05:23:43.000Z | 2022-02-21T04:41:47.000Z | arduino_yun/samples/__init__.py | abhirocks1211/countly-sdk-iot-python | 0ccc5120661c5e356d6a569b31ba5fb135fa8efb | [
"MIT"
] | 7 | 2016-01-07T22:09:48.000Z | 2016-02-16T12:44:09.000Z | arduino_yun/samples/__init__.py | abhirocks1211/countly-sdk-iot-python | 0ccc5120661c5e356d6a569b31ba5fb135fa8efb | [
"MIT"
] | 11 | 2016-03-17T14:03:44.000Z | 2022-02-28T05:32:03.000Z | from arduino_yun.samples import * | 33 | 33 | 0.848485 | 5 | 33 | 5.4 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.090909 | 33 | 1 | 33 | 33 | 0.9 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
711c896d1fd7c39f285bc451c47149f4f3aa9cf7 | 401 | py | Python | Deep_Learning_Nanodegree_Program/05_Teach_a_Quadcopter_How_to_Fly/quad_controller_rl/src/quad_controller_rl/agents/__init__.py | cilsya/udacity | 056c7905b108ab140237a783a0203340256a3ac2 | [
"MIT"
] | 1 | 2018-10-31T17:18:28.000Z | 2018-10-31T17:18:28.000Z | Deep_Learning_Nanodegree_Program/05_Teach_a_Quadcopter_How_to_Fly/quad_controller_rl/src/quad_controller_rl/agents/__init__.py | cilsya/udacity | 056c7905b108ab140237a783a0203340256a3ac2 | [
"MIT"
] | null | null | null | Deep_Learning_Nanodegree_Program/05_Teach_a_Quadcopter_How_to_Fly/quad_controller_rl/src/quad_controller_rl/agents/__init__.py | cilsya/udacity | 056c7905b108ab140237a783a0203340256a3ac2 | [
"MIT"
] | null | null | null | from quad_controller_rl.agents.base_agent import BaseAgent
from quad_controller_rl.agents.policy_search import RandomPolicySearch
from quad_controller_rl.agents.task01_ddpg_agent import Task01_DDPG
from quad_controller_rl.agents.task02_ddpg_agent import Task02_DDPG
from quad_controller_rl.agents.task03_ddpg_agent import Task03_DDPG
from quad_controller_rl.agents.task04_ddpg_agent import Task04_DDPG | 66.833333 | 70 | 0.912718 | 62 | 401 | 5.483871 | 0.274194 | 0.141176 | 0.317647 | 0.352941 | 0.494118 | 0.264706 | 0 | 0 | 0 | 0 | 0 | 0.042328 | 0.057357 | 401 | 6 | 71 | 66.833333 | 0.857143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
856e9c972265d3150e5d7836911b29b63ce3cde4 | 92 | py | Python | pruebas/unit_example/test_unit.py | christophermontero/estima-tu-proyecto | 19f533be203c9ac2c4383ded5a1664dd1d05d679 | [
"MIT"
] | 2 | 2021-05-29T16:57:17.000Z | 2021-06-13T18:39:24.000Z | pruebas/unit_example/test_unit.py | christophermontero/estima-tu-proyecto | 19f533be203c9ac2c4383ded5a1664dd1d05d679 | [
"MIT"
] | 22 | 2021-05-22T18:23:40.000Z | 2021-12-18T21:09:59.000Z | pruebas/unit_example/test_unit.py | christophermontero/estima-tu-proyecto | 19f533be203c9ac2c4383ded5a1664dd1d05d679 | [
"MIT"
] | null | null | null | def crear_estudio(x):
return x + 1
def test_ajiaco():
assert crear_estudio(4) == 5 | 15.333333 | 32 | 0.652174 | 15 | 92 | 3.8 | 0.733333 | 0.421053 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.042254 | 0.228261 | 92 | 6 | 32 | 15.333333 | 0.760563 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 1 | 0.5 | false | 0 | 0 | 0.25 | 0.75 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 6 |
858b70eb3e794d2b278bbe2ceb16250526d503aa | 84 | py | Python | py_tdlib/constructors/notification_settings_scope_group_chats.py | Mr-TelegramBot/python-tdlib | 2e2d21a742ebcd439971a32357f2d0abd0ce61eb | [
"MIT"
] | 24 | 2018-10-05T13:04:30.000Z | 2020-05-12T08:45:34.000Z | py_tdlib/constructors/notification_settings_scope_group_chats.py | MrMahdi313/python-tdlib | 2e2d21a742ebcd439971a32357f2d0abd0ce61eb | [
"MIT"
] | 3 | 2019-06-26T07:20:20.000Z | 2021-05-24T13:06:56.000Z | py_tdlib/constructors/notification_settings_scope_group_chats.py | MrMahdi313/python-tdlib | 2e2d21a742ebcd439971a32357f2d0abd0ce61eb | [
"MIT"
] | 5 | 2018-10-05T14:29:28.000Z | 2020-08-11T15:04:10.000Z | from ..factory import Type
class notificationSettingsScopeGroupChats(Type):
pass
| 14 | 48 | 0.821429 | 8 | 84 | 8.625 | 0.875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.119048 | 84 | 5 | 49 | 16.8 | 0.932432 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.333333 | 0.333333 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 6 |
85c0457a08f3f94adab7782f57bfd2672f09a58c | 148 | py | Python | Programming Languages & Libraries/Python/Python Complete Bootcamp/Modules and Packages/myprogram.py | ttotoc/codebook | 2085e2e29cad9510ba9017e0a760cd0d2d4a734e | [
"MIT"
] | 3 | 2020-06-01T04:17:18.000Z | 2020-12-18T03:05:55.000Z | Programming Languages & Libraries/Python/Python Complete Bootcamp/Modules and Packages/myprogram.py | ttotoc/codebook | 2085e2e29cad9510ba9017e0a760cd0d2d4a734e | [
"MIT"
] | 1 | 2020-04-25T08:01:59.000Z | 2020-04-25T08:01:59.000Z | Programming Languages & Libraries/Python/Python Complete Bootcamp/Modules and Packages/myprogram.py | ttotoc/codebook | 2085e2e29cad9510ba9017e0a760cd0d2d4a734e | [
"MIT"
] | 7 | 2020-04-26T10:02:36.000Z | 2021-06-08T05:12:46.000Z | from MyMainPackage import some_main_script
from MyMainPackage.SubPackage import mysubscript
some_main_script.report_main()
mysubscript.sub_report() | 29.6 | 48 | 0.885135 | 19 | 148 | 6.578947 | 0.526316 | 0.272 | 0.224 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.067568 | 148 | 5 | 49 | 29.6 | 0.905797 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.5 | 0 | 0.5 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
a4575d3161bf14fe195556c827659168de9308ca | 130 | py | Python | instaclone/tests.py | Machanga/instaclone | 8c06b35e60b7d68928c0d26c8e8a2db75111882b | [
"MIT"
] | null | null | null | instaclone/tests.py | Machanga/instaclone | 8c06b35e60b7d68928c0d26c8e8a2db75111882b | [
"MIT"
] | 5 | 2020-06-05T21:59:01.000Z | 2021-09-08T01:10:20.000Z | instaclone/tests.py | Machanga/instaclone | 8c06b35e60b7d68928c0d26c8e8a2db75111882b | [
"MIT"
] | 1 | 2020-11-04T08:39:44.000Z | 2020-11-04T08:39:44.000Z | from django.test import TestCase
# Create your tests here.
from django.test import TestCase
from .models import Image, Profile
| 16.25 | 34 | 0.792308 | 19 | 130 | 5.421053 | 0.631579 | 0.194175 | 0.271845 | 0.38835 | 0.543689 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.161538 | 130 | 7 | 35 | 18.571429 | 0.944954 | 0.176923 | 0 | 0.666667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
a45963ceeb50ae46a530e825003d4698944008c4 | 43 | py | Python | arizona/keyword_spotting/learners/__init__.py | phanxuanphucnd/arizona-spotting | 97895f0e7b721fd58b67d187f6421c0c932ab0b3 | [
"MIT"
] | 2 | 2021-06-16T14:24:19.000Z | 2021-11-23T16:44:58.000Z | arizona/keyword_spotting/learners/__init__.py | phanxuanphucnd/August | 7d60cedbe3feedd8accb7e345cfff29520410ad3 | [
"MIT"
] | null | null | null | arizona/keyword_spotting/learners/__init__.py | phanxuanphucnd/August | 7d60cedbe3feedd8accb7e345cfff29520410ad3 | [
"MIT"
] | null | null | null | from .wav2kws_learner import Wav2KWSLearner | 43 | 43 | 0.906977 | 5 | 43 | 7.6 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.05 | 0.069767 | 43 | 1 | 43 | 43 | 0.9 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
a4813c8330bf1f84a86190b44320a74ae7eb86f1 | 34 | py | Python | src/processing/utils.py | WagnerFLL/Cartola | 1582bc239d4f8694c9e0b96bbe4e1f945ada9073 | [
"MIT"
] | null | null | null | src/processing/utils.py | WagnerFLL/Cartola | 1582bc239d4f8694c9e0b96bbe4e1f945ada9073 | [
"MIT"
] | null | null | null | src/processing/utils.py | WagnerFLL/Cartola | 1582bc239d4f8694c9e0b96bbe4e1f945ada9073 | [
"MIT"
] | null | null | null | def f():
print("Hello World!") | 17 | 25 | 0.558824 | 5 | 34 | 3.8 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.205882 | 34 | 2 | 25 | 17 | 0.703704 | 0 | 0 | 0 | 0 | 0 | 0.342857 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.5 | true | 0 | 0 | 0 | 0.5 | 0.5 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 6 |
f103c7a099ffd9e47eb22224e0b257d5d199fee2 | 3,223 | py | Python | POLSKI_SPOJ/Python/PROGC03.py | janskwr/SPOJ-solutions | e561eba4c363ad4ad0637ff38b05e50d95c001f5 | [
"MIT"
] | null | null | null | POLSKI_SPOJ/Python/PROGC03.py | janskwr/SPOJ-solutions | e561eba4c363ad4ad0637ff38b05e50d95c001f5 | [
"MIT"
] | null | null | null | POLSKI_SPOJ/Python/PROGC03.py | janskwr/SPOJ-solutions | e561eba4c363ad4ad0637ff38b05e50d95c001f5 | [
"MIT"
] | null | null | null | import sys
Stack = [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9]]
Queue = [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9]]
for line in sys.stdin:
command = list(line.split())
if command[0] == 'new_s':
Stack[int(command[1])].append('active')
elif command[0] == 'push':
if len(Stack[int(command[1])]) == 12:
print('error: stack is full')
else:
Stack[int(command[1])].append(command[2])
elif command[0] == 'pop':
if Stack[int(command[1])][-1] == 'active':
print('error: stack is empty')
else:
Stack[int(command[1])].pop()
elif command[0] == 'stack->stack':
if len(Stack[int(command[1])]) == 2:
print('error: wrong command')
elif len(Stack[int(command[2])]) == 12:
print('error: wrong command')
else:
Stack[int(command[2])].append(Stack[int(command[1])][-1])
Stack[int(command[1])].pop()
elif command[0] == 'delete_s':
Stack[int(command[1])].clear()
Stack[int(command[1])].append(int(command[1]))
elif command[0] == 'print_s':
if Stack[int(command[1])][-1] == 'active':
print('empty')
elif Stack[int(command[1])][1] == 'active' and Stack[int(command[1])][-1] != 'active':
print(*Stack[int(command[1])][2:])
elif command[0] == 'new_q':
Queue[int(command[1])].append('active')
elif command[0] == 'enqueue':
if len(Queue[int(command[1])]) == 12:
print('error: queue is full')
else:
Queue[int(command[1])].insert(2, command[2])
elif command[0] == 'dequeue':
if Queue[int(command[1])][-1] == 'active':
print('error: queue is empty')
else:
Queue[int(command[1])].pop()
elif command[0] == 'delete_q':
Queue[int(command[1])].clear()
Queue[int(command[1])].append(int(command[1]))
elif command[0] == 'print_q':
if Queue[int(command[1])][-1] == 'active':
print('empty')
elif Queue[int(command[1])][1] == 'active' and Queue[int(command[1])][-1] != 'active':
print(*Queue[int(command[1])][2:])
elif command[0] == 'stack->queue':
if len(Stack[int(command[1])]) == 2:
print('error: wrong command')
elif len(Queue[int(command[2])]) == 12:
print('error: wrong command')
else:
Queue[int(command[2])].insert(2, Stack[int(command[1])][-1])
Stack[int(command[1])].pop()
elif command[0] == 'queue->queue':
if len(Queue[int(command[1])]) == 2:
print('error: wrong command')
elif len(Queue[int(command[2])]) == 12:
print('error: wrong command')
else:
Queue[int(command[2])].insert(2, Queue[int(command[1])][-1])
Queue[int(command[1])].pop()
elif command[0] == 'queue->stack':
if len(Queue[int(command[1])]) == 2:
print('error: wrong command')
elif len(Stack[int(command[2])]) == 12:
print('error: wrong command')
else:
Stack[int(command[2])].append(Queue[int(command[1])][-1])
Queue[int(command[1])].pop()
| 40.2875 | 94 | 0.508222 | 426 | 3,223 | 3.830986 | 0.107981 | 0.269608 | 0.242647 | 0.166667 | 0.884804 | 0.786765 | 0.714461 | 0.644608 | 0.458333 | 0.458333 | 0 | 0.048065 | 0.270555 | 3,223 | 79 | 95 | 40.797468 | 0.646108 | 0 | 0 | 0.467532 | 0 | 0 | 0.130624 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.012987 | 0 | 0.012987 | 0.233766 | 0 | 0 | 0 | null | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
f15b222a6961703827f123007274a1e0a3091503 | 197 | py | Python | tests/app_test/admin.py | marcosschroh/django-history-actions | fc29eee29ed4f6ba71a366783fefdbe223cbed21 | [
"MIT"
] | 1 | 2018-09-11T18:35:42.000Z | 2018-09-11T18:35:42.000Z | tests/app_test/admin.py | marcosschroh/django-history-actions | fc29eee29ed4f6ba71a366783fefdbe223cbed21 | [
"MIT"
] | null | null | null | tests/app_test/admin.py | marcosschroh/django-history-actions | fc29eee29ed4f6ba71a366783fefdbe223cbed21 | [
"MIT"
] | null | null | null | from django.contrib import admin
from tests.app_test import models
admin.site.register(models.Profile)
admin.site.register(models.SuperProfile)
admin.site.register(models.ProfilePostSaveSignal)
| 21.888889 | 49 | 0.84264 | 26 | 197 | 6.346154 | 0.538462 | 0.163636 | 0.309091 | 0.418182 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.071066 | 197 | 8 | 50 | 24.625 | 0.901639 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.4 | 0 | 0.4 | 0 | 1 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
2d120e1e33b84475c311e860239e030c1188a29f | 34 | py | Python | mcdc_tnt/numba_kernels/cuda/__init__.py | jpmorgan98/MCDC-TNT | a7772b169eb431c54e729feff4128545a735c7c2 | [
"BSD-3-Clause"
] | null | null | null | mcdc_tnt/numba_kernels/cuda/__init__.py | jpmorgan98/MCDC-TNT | a7772b169eb431c54e729feff4128545a735c7c2 | [
"BSD-3-Clause"
] | null | null | null | mcdc_tnt/numba_kernels/cuda/__init__.py | jpmorgan98/MCDC-TNT | a7772b169eb431c54e729feff4128545a735c7c2 | [
"BSD-3-Clause"
] | null | null | null | from .advance_cuda import Advance
| 17 | 33 | 0.852941 | 5 | 34 | 5.6 | 0.8 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.117647 | 34 | 1 | 34 | 34 | 0.933333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
7773ca915a870917892234f9666cddc4ae119c99 | 26,958 | py | Python | serial_scripts/k8s_scripts/test_isolation.py | vkolli/5.0_contrail-test | 1793f169a94100400a1b2fafbad21daf5aa4d48a | [
"Apache-2.0"
] | null | null | null | serial_scripts/k8s_scripts/test_isolation.py | vkolli/5.0_contrail-test | 1793f169a94100400a1b2fafbad21daf5aa4d48a | [
"Apache-2.0"
] | 1 | 2021-06-01T22:18:29.000Z | 2021-06-01T22:18:29.000Z | serial_scripts/k8s_scripts/test_isolation.py | vkolli/5.0_contrail-test | 1793f169a94100400a1b2fafbad21daf5aa4d48a | [
"Apache-2.0"
] | null | null | null | from common.k8s.base import BaseK8sTest
from tcutils.wrappers import preposttest_wrapper
from time import sleep
from tcutils.util import get_random_name
from tcutils.contrail_status_check import ContrailStatusChecker
import test
from tcutils.util import skip_because
from vn_test import VNFixture
class TestNSIsolationSerial(BaseK8sTest):
@classmethod
def setUpClass(cls):
super(TestNSIsolationSerial, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(TestNSIsolationSerial, cls).tearDownClass()
def setup_common_namespaces_pods(self, prov_service = False, prov_ingress = False):
service_ns1, ingress_ns1 = None, None
service_ns2, ingress_ns2 = None, None
service_ns3, ingress_ns3 = None, None
namespace1_name = get_random_name("ns1")
namespace2_name = get_random_name("ns2")
namespace3_name = get_random_name("ns3")
namespace1 = self.setup_namespace(name = namespace1_name, isolation = True)
namespace2 = self.setup_namespace(name = namespace2_name, isolation = True)
namespace3 = self.setup_namespace(name = namespace3_name)
assert namespace1.verify_on_setup()
assert namespace2.verify_on_setup()
assert namespace3.verify_on_setup()
ns_1_label = "namespace1"
ns_2_label = "namespace2"
ns_3_label = "namespace3"
client1_ns1 = self.setup_nginx_pod(namespace=namespace1_name,
labels={'app': ns_1_label})
client2_ns1 = self.setup_nginx_pod(namespace=namespace1_name,
labels={'app': ns_1_label})
client3_ns1 = self.setup_busybox_pod(namespace=namespace1_name)
client1_ns2 = self.setup_nginx_pod(namespace=namespace2_name,
labels={'app': ns_2_label})
client2_ns2 = self.setup_nginx_pod(namespace=namespace2_name,
labels={'app': ns_2_label})
client3_ns2 = self.setup_busybox_pod(namespace=namespace2_name)
client1_ns3 = self.setup_nginx_pod(namespace=namespace3_name,
labels={'app': ns_3_label})
client2_ns3 = self.setup_nginx_pod(namespace=namespace3_name,
labels={'app': ns_3_label})
client3_ns3 = self.setup_busybox_pod(namespace=namespace3_name)
assert self.verify_nginx_pod(client1_ns1)
assert self.verify_nginx_pod(client2_ns1)
assert client3_ns1.verify_on_setup()
assert self.verify_nginx_pod(client1_ns2)
assert self.verify_nginx_pod(client2_ns2)
assert client3_ns2.verify_on_setup()
assert self.verify_nginx_pod(client1_ns3)
assert self.verify_nginx_pod(client2_ns3)
assert client3_ns3.verify_on_setup()
if prov_service == True:
service_ns1 = self.setup_http_service(namespace=namespace1.name,
labels={'app': ns_1_label})
service_ns2 = self.setup_http_service(namespace=namespace2.name,
labels={'app': ns_2_label})
service_ns3 = self.setup_http_service(namespace=namespace3.name,
labels={'app': ns_3_label})
if prov_ingress == True:
ingress_ns1 = self.setup_simple_nginx_ingress(service_ns1.name,
namespace=namespace1.name)
ingress_ns3 = self.setup_simple_nginx_ingress(service_ns3.name,
namespace=namespace3.name)
assert ingress_ns1.verify_on_setup()
assert ingress_ns3.verify_on_setup()
client1 = [client1_ns1, client2_ns1, client3_ns1, service_ns1,\
namespace1, ingress_ns1]
client2 = [client1_ns2, client2_ns2, client3_ns2, service_ns2,\
namespace2]
client3 = [client1_ns3, client2_ns3, client3_ns3, service_ns3,\
namespace3, ingress_ns3]
return (client1, client2, client3)
#end setup_common_namespaces_pods
@test.attr(type=['openshift_1'])
@preposttest_wrapper
def test_pods_isolation_post_kube_manager_restart(self):
"""
This test case verifies the connectivity between pods of different namespaces with
namespace isolation enabled post restart of contrail-kube-manager
Verify:
1. Pods in other namespaces in the Kubernetes cluster will NOT be able to reach pods in the isolated namespace.
2. Pods created in isolated namespace can reach pods in other namespaces.
Restart contrail-kube-manager and verify both the points again
"""
client1, client2, client3 = self.setup_common_namespaces_pods()
#Check 1:
assert client1[2].ping_to_ip(client2[0].pod_ip, expectation=False)
assert client3[2].ping_to_ip(client2[0].pod_ip, expectation=False)
#Check 2
assert client1[2].ping_to_ip(client3[0].pod_ip, expectation=False)
self.restart_kube_manager()
#Check 1:
assert client1[2].ping_to_ip(client2[0].pod_ip, expectation=False)
assert client3[2].ping_to_ip(client2[0].pod_ip, expectation=False)
#Check 2
assert client1[2].ping_to_ip(client3[0].pod_ip, expectation=False)
#end test_pods_isolation_post_kube_manager_restart
@test.attr(type=['k8s_sanity','openshift_1'])
@preposttest_wrapper
def test_service_isolation_post_kube_manager_restart(self):
"""
This test case verifies the connectivity between pods and service of different namespaces with
namespace isolation enabled post restart of contrail-kube-manager
Verify:
1. Pods in isolated namespace will be able to reach ALL Services created in default namespace in the kubernetes cluster.
2. Pods in isolated namespace cannot be reached from pods in other namespaces through Kubernetes Service-ip
Restart contrail-kube-manager and verify both the points again
"""
client1, client2, client3 = self.setup_common_namespaces_pods(prov_service = True)
#Check 1:
assert self.validate_nginx_lb([client3[0], client3[1]], client3[3].cluster_ip,
test_pod=client1[2])
assert self.validate_nginx_lb([client2[0], client2[1]], client2[3].cluster_ip,
test_pod=client1[2], expectation=False)
#Check 2:
assert self.validate_nginx_lb([client1[0], client1[1]], client1[3].cluster_ip,
test_pod=client3[2], expectation=False)
self.restart_kube_manager()
#Check 1:
assert self.validate_nginx_lb([client3[0], client3[1]], client3[3].cluster_ip,
test_pod=client1[2])
assert self.validate_nginx_lb([client2[0], client2[1]], client2[3].cluster_ip,
test_pod=client1[2], expectation=False)
#Check 2:
assert self.validate_nginx_lb([client1[0], client1[1]], client1[3].cluster_ip,
test_pod=client3[2], expectation=False)
#end test_service_isolation_post_kube_manager_restart
@skip_because(mx_gw = False)
@preposttest_wrapper
def test_ingress_isolation_post_kube_manager_restart(self):
"""
Test test case verifies ingress operations post restart of contrail-kube-manager
Verify:
1. Verify that k8s INgress existing in isolated namespace is accessible from external world
2. Verify that k8s INgress existing in non isolated namespace is accessible from external world
Restart contrail-kube-manager and verify both the points again
"""
client1, client2, client3 = self.setup_common_namespaces_pods(prov_service = True,
prov_ingress = True)
assert self.validate_nginx_lb([client1[0], client1[1]], client1[5].external_ips[0])
assert self.validate_nginx_lb([client3[0], client3[1]], client3[5].external_ips[0])
self.restart_kube_manager()
assert self.validate_nginx_lb([client1[0], client1[1]], client1[5].external_ips[0])
assert self.validate_nginx_lb([client3[0], client3[1]], client3[5].external_ips[0])
#end test_ingress_isolation_post_kube_manager_restart
@skip_because(mx_gw = False)
@preposttest_wrapper
def test_ingress_isolation_vrouter_agent_restart(self):
"""
Test test case verifies ingress operations post restart of vrouter-agent
Verify:
1. Verify that k8s INgress existing in isolated namespace is accessible from external world
2. Verify that k8s INgress existing in non isolated namespace is accessible from external world
Restart vrouter-agent and verify both the points again
"""
client1, client2, client3 = self.setup_common_namespaces_pods(prov_service = True,
prov_ingress = True)
assert self.validate_nginx_lb([client1[0], client1[1]], client1[5].external_ips[0])
assert self.validate_nginx_lb([client3[0], client3[1]], client3[5].external_ips[0])
self.restart_vrouter_agent()
assert self.validate_nginx_lb([client1[0], client1[1]], client1[5].external_ips[0])
assert self.validate_nginx_lb([client3[0], client3[1]], client3[5].external_ips[0])
#end test_ingress_isolation_vrouter_agent_restart
class TestCustomIsolationSerial(BaseK8sTest):
@classmethod
def setUpClass(cls):
super(TestCustomIsolationSerial, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(TestCustomIsolationSerial, cls).tearDownClass()
def setup_common_namespaces_pods(self, prov_service = False):
service_ns1 = None
service_ns2 = None
vn_for_namespace = self.setup_vn(vn_name = "TestVNNamespace")
vn_dict_for_namespace = {"domain": vn_for_namespace.domain_name,
"project" : vn_for_namespace.project_name[0],
"name": vn_for_namespace.vn_name}
vn_for_pod = self.setup_vn(vn_name = "TestVNPod")
vn_dict_for_pod = {"domain": vn_for_pod.domain_name,
"project" : vn_for_pod.project_name[0],
"name": vn_for_pod.vn_name}
namespace1_name = get_random_name("ns1")
namespace2_name = get_random_name("ns2")
namespace1 = self.setup_namespace(name = namespace1_name)
namespace2 = self.setup_namespace(name = namespace2_name, custom_isolation = True,
fq_network_name= vn_dict_for_namespace)
assert namespace1.verify_on_setup()
assert namespace2.verify_on_setup()
ns_1_label = "namespace1"
ns_2_label = "namespace2"
client1_ns1 = self.setup_nginx_pod(namespace=namespace1_name,
labels={'app': ns_1_label})
client2_ns1 = self.setup_nginx_pod(namespace=namespace1_name,
labels={'app': ns_1_label})
client3_ns1 = self.setup_busybox_pod(namespace=namespace1_name)
client4_ns1 = self.setup_busybox_pod(namespace=namespace1_name,
custom_isolation = True,
fq_network_name= vn_dict_for_pod)
client5_ns1 = self.setup_busybox_pod(namespace=namespace1_name,
custom_isolation = True,
fq_network_name= vn_dict_for_pod)
client1_ns2 = self.setup_nginx_pod(namespace=namespace2_name,
labels={'app': ns_2_label})
client2_ns2 = self.setup_nginx_pod(namespace=namespace2_name,
labels={'app': ns_2_label})
client3_ns2 = self.setup_busybox_pod(namespace=namespace2_name)
client4_ns2 = self.setup_busybox_pod(namespace=namespace2_name,
custom_isolation = True,
fq_network_name= vn_dict_for_pod)
assert self.verify_nginx_pod(client1_ns1)
assert self.verify_nginx_pod(client2_ns1)
assert client3_ns1.verify_on_setup()
assert client4_ns1.verify_on_setup()
assert client5_ns1.verify_on_setup()
assert self.verify_nginx_pod(client1_ns2)
assert self.verify_nginx_pod(client2_ns2)
assert client3_ns2.verify_on_setup()
assert client4_ns2.verify_on_setup()
if prov_service == True:
service_ns1 = self.setup_http_service(namespace=namespace1.name,
labels={'app': ns_1_label})
service_ns2 = self.setup_http_service(namespace=namespace2.name,
labels={'app': ns_2_label})
client1 = [client1_ns1, client2_ns1, client3_ns1, service_ns1,\
namespace1, client4_ns1, client5_ns1]
client2 = [client1_ns2, client2_ns2, client3_ns2, service_ns2,\
namespace2, client4_ns2, vn_for_namespace]
return (client1, client2)
#end setup_common_namespaces_pods
@test.attr(type=['k8s_sanity','openshift_1'])
@preposttest_wrapper
def test_pods_custom_isolation_post_kube_manager_restart(self):
"""
Verify that after restart of contrail-kubemanager, pod reachability to
and from custom isolated namespace/pod is not affected
Verify following reachability:
1. Verify reachability between pods and namespaces
2. restart contrail-kube-manager
3. Verify reachability between pods and namespaces
"""
client1, client2 = self.setup_common_namespaces_pods()
assert client1[5].ping_to_ip(client1[0].pod_ip, expectation=False)
assert client1[5].ping_to_ip(client2[0].pod_ip, expectation=False)
assert client1[5].ping_to_ip(client1[6].pod_ip)
assert client1[5].ping_to_ip(client2[5].pod_ip)
assert client2[2].ping_to_ip(client2[0].pod_ip)
assert client2[2].ping_to_ip(client2[5].pod_ip, expectation=False)
assert client2[5].ping_to_ip(client1[2].pod_ip, expectation=False)
assert client2[5].ping_to_ip(client1[5].pod_ip)
self.restart_kube_manager()
assert client1[5].ping_to_ip(client1[0].pod_ip, expectation=False)
assert client1[5].ping_to_ip(client2[0].pod_ip, expectation=False)
assert client1[5].ping_to_ip(client1[6].pod_ip)
assert client1[5].ping_to_ip(client2[5].pod_ip)
assert client2[2].ping_to_ip(client2[0].pod_ip)
assert client2[2].ping_to_ip(client2[5].pod_ip, expectation=False)
assert client2[5].ping_to_ip(client1[2].pod_ip, expectation=False)
assert client2[5].ping_to_ip(client1[5].pod_ip)
#end test_pods_custom_isolation_post_kube_manager_restart
@test.attr(type=['k8s_sanity','openshift_1'])
@preposttest_wrapper
def test_services_custom_isolation_post_kube_manager_restart(self):
"""
Verify that after restart of contrail-kubemanager, service reachability to
and from custom isolated namespace/pod is not affected
Verify following reachability:
1. Verify reachability between pods and services
2. restart contrail-kube-manager
3. Verify reachability between pods and services
"""
client1, client2 = self.setup_common_namespaces_pods(prov_service = True)
policy_name='allow-btw-custom-ns-and-service'
k8s_default_service_vn_name = "k8s-default-service-network"
k8s_default_service_vn_fq_name = self.inputs.project_fq_name + \
[k8s_default_service_vn_name]
k8s_default_service_vn_obj = self.vnc_lib.virtual_network_read(
fq_name = k8s_default_service_vn_fq_name)
k8s_service_vn_fixt = VNFixture(connections = self.connections,
vn_name = k8s_default_service_vn_name,
option="contrail",
uuid = k8s_default_service_vn_obj.uuid)
k8s_service_vn_fixt.setUp()
vn_service_policy = self.setup_policy_between_vns(client2[6],
k8s_service_vn_fixt,
api="contrail")
assert self.validate_nginx_lb([client2[0], client2[1]], client2[3].cluster_ip,
test_pod=client2[2])
assert self.validate_nginx_lb([client1[0], client1[1]], client1[3].cluster_ip,
test_pod=client2[2])
assert self.validate_nginx_lb([client2[0], client2[1]], client2[3].cluster_ip,
test_pod=client1[2])
self.restart_kube_manager()
assert self.validate_nginx_lb([client2[0], client2[1]], client2[3].cluster_ip,
test_pod=client2[2])
assert self.validate_nginx_lb([client1[0], client1[1]], client1[3].cluster_ip,
test_pod=client2[2])
assert self.validate_nginx_lb([client2[0], client2[1]], client2[3].cluster_ip,
test_pod=client1[2])
#end test_services_custom_isolation_post_kube_manager_restart
class TestProjectIsolationSerial(BaseK8sTest):
@classmethod
def setUpClass(cls):
super(TestProjectIsolationSerial, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(TestProjectIsolationSerial, cls).tearDownClass()
def setup_common_namespaces_pods(self, prov_service = False,
prov_ingress = False,
isolation = False):
operation = self.modify_cluster_project()
service_ns1, ingress_ns1 = None, None
service_ns2, ingress_ns2 = None, None
namespace1_name = get_random_name("ns1")
namespace2_name = get_random_name("ns2")
namespace1 = self.setup_namespace(name = namespace1_name)
namespace2 = self.setup_namespace(name = namespace2_name, isolation = isolation)
assert namespace1.verify_on_setup()
assert namespace2.verify_on_setup()
if operation=="reset":
assert namespace1.project_isolation
assert namespace2.project_isolation
else:
assert (namespace1.project_isolation == False)
assert (namespace2.project_isolation == False)
ns_1_label = "namespace1"
ns_2_label = "namespace2"
client1_ns1 = self.setup_nginx_pod(namespace=namespace1_name,
labels={'app': ns_1_label})
client2_ns1 = self.setup_nginx_pod(namespace=namespace1_name,
labels={'app': ns_1_label})
client3_ns1 = self.setup_busybox_pod(namespace=namespace1_name)
client1_ns2 = self.setup_nginx_pod(namespace=namespace2_name,
labels={'app': ns_2_label})
client2_ns2 = self.setup_nginx_pod(namespace=namespace2_name,
labels={'app': ns_2_label})
client3_ns2 = self.setup_busybox_pod(namespace=namespace2_name)
assert self.verify_nginx_pod(client1_ns1)
assert self.verify_nginx_pod(client2_ns1)
assert client3_ns1.verify_on_setup()
assert self.verify_nginx_pod(client1_ns2)
assert self.verify_nginx_pod(client2_ns2)
assert client3_ns2.verify_on_setup()
if prov_service == True:
service_ns1 = self.setup_http_service(namespace=namespace1.name,
labels={'app': ns_1_label})
type = "LoadBalancer" if prov_ingress == False else None
service_ns2 = self.setup_http_service(namespace=namespace2.name,
labels={'app': ns_2_label},
type=type)
if prov_ingress == True:
ingress_ns1 = self.setup_simple_nginx_ingress(service_ns1.name,
namespace=namespace1.name)
ingress_ns2 = self.setup_simple_nginx_ingress(service_ns2.name,
namespace=namespace2.name)
assert ingress_ns1.verify_on_setup()
assert ingress_ns2.verify_on_setup()
client1 = [client1_ns1, client2_ns1, client3_ns1, service_ns1,\
namespace1, ingress_ns1]
client2 = [client1_ns2, client2_ns2, client3_ns2, service_ns2,\
namespace2, ingress_ns2]
return (client1, client2)
#end setup_common_namespaces_pods
@test.attr(type=['openshift_1'])
@preposttest_wrapper
def test_pod_reachability_across_projects(self):
"""
Check reachability of Pods of different namespaces across different projects
"""
client1, client2 = self.setup_common_namespaces_pods()
assert client1[2].ping_to_ip(client1[0].pod_ip)
assert client1[2].ping_to_ip(client2[0].pod_ip)
assert client2[2].ping_to_ip(client1[0].pod_ip)
# end test_pod_reachability_across_ns
@skip_because(mx_gw = False)
@preposttest_wrapper
def test_service_reachability_across_projects(self):
"""
Check reachability of Service of different namespaces across different projects
"""
client1, client2 = self.setup_common_namespaces_pods(prov_service = True)
# Service reachability within namespace/project
assert self.validate_nginx_lb([client1[0], client1[1]], client1[3].cluster_ip,
test_pod=client1[2])
assert self.validate_nginx_lb([client2[0], client2[1]], client2[3].cluster_ip,
test_pod=client2[2])
# Service reachability across namespace/project
assert self.validate_nginx_lb([client2[0], client2[1]], client2[3].cluster_ip,
test_pod=client1[2])
assert self.validate_nginx_lb([client1[0], client1[1]], client1[3].cluster_ip,
test_pod=client2[2])
#External connectivity check
assert self.validate_nginx_lb([client2[0], client2[1]], client2[3].external_ips[0])
# end test_service_reachability_across_ns
@skip_because(mx_gw = False)
@preposttest_wrapper
def test_ingress_reachability_across_projects(self):
"""
Check reachability of Ingress created in project namespace
"""
client1, client2 = self.setup_common_namespaces_pods(prov_service = True,
prov_ingress = True)
# Ingress reachability within namespace/project
assert self.validate_nginx_lb([client1[0], client1[1]], client1[5].external_ips[0])
# Ingress reachability across namespace/project
assert self.validate_nginx_lb([client2[0], client2[1]], client2[5].external_ips[0])
# end test_ingress_reachability_across_ns
@test.attr(type=['openshift_1'])
@preposttest_wrapper
def test_reachability_across_projects_with_isolated_namespace(self):
"""
Check reachability between Pods and services created in isolated namespace.
Note that the namespace should have seperate Project.
1. Create 2 namespaces. 1 as non isolated and other as isolated.
2. Create Pods and service under both the namespaces.
3. Verify reachability
"""
client1, client2 = self.setup_common_namespaces_pods(prov_service = True,
isolation = True)
# Reachability of Pods
assert client1[2].ping_to_ip(client1[0].pod_ip)
assert client2[2].ping_to_ip(client2[0].pod_ip)
assert client2[2].ping_to_ip(client1[0].pod_ip, expectation = False)
assert client1[2].ping_to_ip(client2[0].pod_ip, expectation = False)
# Reachability of Services
assert self.validate_nginx_lb([client2[0], client2[1]], client2[3].cluster_ip,
test_pod=client2[2])
assert self.validate_nginx_lb([client1[0], client1[1]], client1[3].cluster_ip,
test_pod=client2[2])
assert self.validate_nginx_lb([client2[0], client2[1]], client2[3].cluster_ip,
test_pod=client1[2], expectation = False)
# end test_reachability_across_projects_with_isolated_namespace
@test.attr(type=['k8s_sanity'])
@preposttest_wrapper
def test_reachability_across_projects_with_kube_manager_restart(self):
"""
Check reachability between Pods and services after kube manager restart
"""
client1, client2 = self.setup_common_namespaces_pods(prov_service = True)
# Reachability of Pods
assert client1[2].ping_to_ip(client1[0].pod_ip)
assert client1[2].ping_to_ip(client2[0].pod_ip)
assert client2[2].ping_to_ip(client1[0].pod_ip)
# Reachability of Services
assert self.validate_nginx_lb([client2[0], client2[1]], client2[3].cluster_ip,
test_pod=client2[2])
assert self.validate_nginx_lb([client2[0], client2[1]], client2[3].cluster_ip,
test_pod=client1[2])
assert self.validate_nginx_lb([client1[0], client1[1]], client1[3].cluster_ip,
test_pod=client2[2])
self.restart_kube_manager()
# Reachability of Pods
assert client1[2].ping_to_ip(client1[0].pod_ip)
assert client1[2].ping_to_ip(client2[0].pod_ip)
assert client2[2].ping_to_ip(client1[0].pod_ip)
# Reachability of Services
assert self.validate_nginx_lb([client2[0], client2[1]], client2[3].cluster_ip,
test_pod=client2[2])
assert self.validate_nginx_lb([client2[0], client2[1]], client2[3].cluster_ip,
test_pod=client1[2])
assert self.validate_nginx_lb([client1[0], client1[1]], client1[3].cluster_ip,
test_pod=client2[2])
# end test_reachability_across_projects_with_kube_manager_restart
| 54.904277 | 128 | 0.630796 | 3,152 | 26,958 | 5.085977 | 0.057741 | 0.031439 | 0.040422 | 0.05165 | 0.851538 | 0.834009 | 0.806812 | 0.771692 | 0.742748 | 0.725033 | 0 | 0.042613 | 0.286186 | 26,958 | 490 | 129 | 55.016327 | 0.790469 | 0.139588 | 0 | 0.69837 | 0 | 0 | 0.018055 | 0.00256 | 0 | 0 | 0 | 0 | 0.298913 | 1 | 0.054348 | false | 0 | 0.021739 | 0 | 0.092391 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
777ab4dd18066039eb5806d735735b81d3a33b5c | 20 | py | Python | test/pathod/protocols/test_websockets.py | 0x7c48/mitmproxy | f9d8f3bae3f4e681d5f4d406b7e06b099e60ecba | [
"MIT"
] | 24,939 | 2015-01-01T17:13:21.000Z | 2022-03-31T17:50:04.000Z | test/pathod/protocols/test_websockets.py | 0x7c48/mitmproxy | f9d8f3bae3f4e681d5f4d406b7e06b099e60ecba | [
"MIT"
] | 3,655 | 2015-01-02T12:31:43.000Z | 2022-03-31T20:24:57.000Z | test/pathod/protocols/test_websockets.py | 0x7c48/mitmproxy | f9d8f3bae3f4e681d5f4d406b7e06b099e60ecba | [
"MIT"
] | 3,712 | 2015-01-06T06:47:06.000Z | 2022-03-31T10:33:27.000Z | # TODO: write tests
| 10 | 19 | 0.7 | 3 | 20 | 4.666667 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.2 | 20 | 1 | 20 | 20 | 0.875 | 0.85 | 0 | null | 0 | null | 0 | 0 | null | 0 | 0 | 1 | null | 1 | null | true | 0 | 0 | null | null | null | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
77d68acf173eeadfbe9be3558642d311e94806ce | 45 | py | Python | cvax/transforms/__init__.py | toru34/cvax | 7829ab84aa53da33c61e2f929fb24b6998148d3e | [
"MIT"
] | null | null | null | cvax/transforms/__init__.py | toru34/cvax | 7829ab84aa53da33c61e2f929fb24b6998148d3e | [
"MIT"
] | null | null | null | cvax/transforms/__init__.py | toru34/cvax | 7829ab84aa53da33c61e2f929fb24b6998148d3e | [
"MIT"
] | null | null | null | from cvax.transforms.transforms import Resize | 45 | 45 | 0.888889 | 6 | 45 | 6.666667 | 0.833333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.066667 | 45 | 1 | 45 | 45 | 0.952381 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
77dd4f42dd11ddaaf204bdbd3f2447eb45d7c7a2 | 4,712 | py | Python | fermi/bubble.py | maryprimary/frg | e789439f599eb884a6220ae5b471cf610b0c2b2a | [
"MIT"
] | null | null | null | fermi/bubble.py | maryprimary/frg | e789439f599eb884a6220ae5b471cf610b0c2b2a | [
"MIT"
] | 12 | 2021-02-04T06:46:36.000Z | 2021-07-01T00:43:38.000Z | fermi/bubble.py | maryprimary/frg | e789439f599eb884a6220ae5b471cf610b0c2b2a | [
"MIT"
] | null | null | null | """定义在10.112中的bubble integrals
"""
import numpy
from basics import Point
from basics.point import middle_point
#pylint: disable=pointless-string-statement
def pi_plus_ec(posi, nega, lamb, qval, disp, ksft, area):
'''使用能量cutoff作为flow parameter的bubble\n
posi是能量为+LAMBDA的边,nega是能量为-LAMBDA的边, lamb是LAMBDA\n
disp是色散关系,qval是需要平移的大小,应该用一个Point来包装,\n
kshf是动量相加的函数, 这个函数应该能处理好到第一布里渊区的映射\n
area是第一布里渊区的面积\n
```(10.112)本身已经处理好了动量守恒,k, k-q是需要满足动量守恒的关系的,而处理好```
```k-q到第一布里渊区的映射就处理好了Umklapp```
'''
'''
10.112中的 PI^+(n, q) = +LAMBDA (2pi)^-2 beta^-1 Int_{k in k_n} G'(k)G(k - Q)
其中有一个beta是频率积分带来的,2pi^2是动量积分带来的
G(k)=CITA(LAMBDA < abs(disp(k))) / i*omega - disp(k)
G'(k)=-DELTA(abs(disp(k))-LAMBDA) / i*omege - disp(k)
在零温的情况下10.112中的频率部分可以积分出来,此后的k都是不包含频率的
= +LAMBDA (2pi)^-2 Int_{k in k_n} CITA() -DELTA()
{ beta^-1 sum_{omega} [(i*omega-disp(k))(i*omega-disp(k - q))]^-1 }
花括号中的内容求和完之后等于 - CITA(-disp(k)disp(k-q)) / (abs(disp(k)) + abs(disp(k-p)))
积分会变成
= +LAMBDA (2pi)^-2 Int_{k in k_n} DELTA(abs(disp(k))-LAMBDA) CITA(LAMBDA<abs(disp(k-q)))
CITA(-disp(k)disp(k-q)) / (abs(disp(k)) + abs(disp(k-p)))
因为采用的能量cutoff中有一个 DELTA(abs(disp(k))-LAMBDA),disp(k)等于正的或者负的LAMBDA
而CITA(-disp(k)disp(k-q))限制了disp(k)和disp(k-q)符号相反
所以上式变成
(第一项disp(k)=LAMBDA>0,于是disp(k-q)<0,而且abs(disp(k))=-disp(k)>LAMBDA)
(第二项类似,分子中的abs(disp(k))都可以直接换成LAMBDA,abs(disp(k-q))也都知道符号)
= +LAMBDA (2pi)^-2 Int_{k in kn} {
DELTA(disp(k)-LAMBDA)CITA(-disp(k-q)-LAMBDA) / (LAMBDA - disp(k - q))
DELTA(disp(k)+LAMBDA)CITA(disp(k-q)-LAMBDA) / (LAMBDA + disp(k - q)) }
还可以从积分里面把DELTA给积分掉,这样对于二维平面的积分也会变成对
disp(k) = LAMBDA 或者 -LAMBDA的线的积分
= +LAMBDA (2pi)^-2 *
[Int_{disp(k) = +LAMBDA} CITA(-disp(k-q)-LAMBDA) / (LAMBDA - disp(k - q))]
+[Int_{disp(k) = -LAMBDA} CITA(disp(k-q)-LAMBDA) / (LAMBDA + disp(k - q)) ]
'''
nega_q = Point(-qval.coord[0], -qval.coord[1], 1)
#积分正LAMBDA的线
intposi = 0.
for edg in posi:
kval = middle_point(edg.ends[0], edg.ends[1])
kprim = ksft(kval, nega_q)
#CITA
disp_kprim = disp(kprim.coord[0], kprim.coord[1])
if -disp_kprim < lamb:
continue
#线积分,计算线元的长度
intposi += edg.length / (lamb - disp_kprim)
#积分负LAMBDA的线
intnega = 0.
for edg in nega:
kval = middle_point(edg.ends[0], edg.ends[1])
kprim = ksft(kval, nega_q)
#CITA
disp_kprim = disp(kprim.coord[0], kprim.coord[1])
if disp_kprim < lamb:
continue
intnega += edg.length / (lamb + disp_kprim)
#乘上系数
result = lamb * (intposi + intnega) / area#numpy.square(numpy.pi*2)
return result
def pi_minus_ec(posi, nega, lamb, qval, disp, ksft, area):
'''使用能量cutoff作为flow parameter的bubble\n
posi是能量为+LAMBDA的边,nega是能量为-LAMBDA的边, lamb是LAMBDA\n
disp是色散关系,qval是需要平移的大小,应该用一个Point来包装,\n
kshf是动量相加的函数, 这个函数应该能处理好到第一布里渊区的映射\n
area是第一布里渊区的面积\n
```(10.112)本身已经处理好了动量守恒,k, k-q是需要满足动量守恒的关系的,而处理好```
```k-q到第一布里渊区的映射就处理好了Umklapp```
'''
'''
10.112中的 PI^-(n, q) = -LAMBDA (2pi)^-2 beta^-1 Int_{k in k_n} G'(k)G(- k + Q)
= -LAMBDA (2pi)^-2 Int_{k in k_n} CITA() -DELTA()
{ beta^-1 sum_{omega} [(i*omega-disp(k))(-i*omega-disp(-k + q))]^-1 }
在零温下这个频率积分等于,注意-k那里把频率也给反过来了
+CITA(+disp(k)disp(-k+q)) / (abs(disp(k)) + abs(disp(-k+q)))
原式就等于
= LAMBDA (2pi)^-2 Int_{k in k_n} {
DELTA(abs(disp(k))-LAMBDA) CITA(abs(disp(-k+q)-LAMBDA))
CITA(disp(k)disp(-k+q)) / (abs(disp(k)) + abs(disp(-k+q))) }
第二个CITA限制了disp(k)和disp(-k+q)同号,积分积掉DELTA,分类讨论正负
= LAMBDA (2pi)^-2 {
Int_{disp(k) = +LAMBDA} CITA(disp(-k+q) - LAMBDA) / (LAMBDA + disp(-k+q)) +
Int_{disp(k) = -LAMBDA} CITA(-disp(-k+q) -LAMBDA) / (LAMBDA - disp(-k+q))
}
'''
#积分正LAMBDA的线
intposi = 0.
for edg in posi:
kval = middle_point(edg.ends[0], edg.ends[1])
nega_k = Point(-kval.coord[0], -kval.coord[1], 1)
kprim = ksft(nega_k, qval)
#CITA
disp_kprim = disp(kprim.coord[0], kprim.coord[1])
if disp_kprim < lamb:
continue
#要计算线元的长度
intposi += edg.length / (lamb + disp_kprim)
#积分负LAMBDA的线
intnega = 0.
for edg in nega:
kval = middle_point(edg.ends[0], edg.ends[1])
nega_k = Point(-kval.coord[0], -kval.coord[1], 1)
kprim = ksft(nega_k, qval)
#CITA
disp_kprim = disp(kprim.coord[0], kprim.coord[1])
if -disp_kprim < lamb:
continue
intnega += edg.length / (lamb - disp_kprim)
#乘上系数
result = lamb * (intposi + intnega) / area#numpy.square(numpy.pi*2)
return result
| 38.622951 | 92 | 0.597623 | 687 | 4,712 | 4.030568 | 0.173217 | 0.099314 | 0.052004 | 0.043337 | 0.785121 | 0.757674 | 0.746118 | 0.746118 | 0.746118 | 0.746118 | 0 | 0.024237 | 0.220713 | 4,712 | 121 | 93 | 38.942149 | 0.729847 | 0.153862 | 0 | 0.772727 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.068182 | 0 | 0.159091 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
247ceb031715204b61e04d5096ccb4a1e8a0c1c5 | 19 | py | Python | simulator/uerrno.py | ondiiik/meteoink | 9bc7af929de12ed5eb2fafd64fcfe447f07b6eeb | [
"MIT"
] | 2 | 2021-05-27T13:32:16.000Z | 2022-03-30T01:23:34.000Z | simulator/uerrno.py | ondiiik/meteoink | 9bc7af929de12ed5eb2fafd64fcfe447f07b6eeb | [
"MIT"
] | null | null | null | simulator/uerrno.py | ondiiik/meteoink | 9bc7af929de12ed5eb2fafd64fcfe447f07b6eeb | [
"MIT"
] | null | null | null | from errno import * | 19 | 19 | 0.789474 | 3 | 19 | 5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.157895 | 19 | 1 | 19 | 19 | 0.9375 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
24cd69052af739b321af464aa0fd33831ccac381 | 34 | py | Python | server/opendp_apps/profiler/col_info_constants/__init__.py | mikephelan/opendp-ux | 80c65da0ed17adc01c69b05dbc9cbf3a5973a016 | [
"MIT"
] | 6 | 2021-05-25T18:50:58.000Z | 2022-03-23T19:52:15.000Z | server/opendp_apps/profiler/col_info_constants/__init__.py | mikephelan/opendp-ux | 80c65da0ed17adc01c69b05dbc9cbf3a5973a016 | [
"MIT"
] | 298 | 2021-05-19T17:34:09.000Z | 2022-03-29T18:45:22.000Z | server/opendp_apps/profiler/col_info_constants/__init__.py | opendp/dpcreator | 6ba3c58ecdcd81ca1f4533a14ce7604eccf6a646 | [
"MIT"
] | null | null | null | from .col_info_constants import *
| 17 | 33 | 0.823529 | 5 | 34 | 5.2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.117647 | 34 | 1 | 34 | 34 | 0.866667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
24def7b545beb8780a1586b91e5d3b3ccec3b2b5 | 284 | py | Python | snmpagent_unity/unity_impl/FrontendPortType.py | factioninc/snmp-unity-agent | 3525dc0fac60d1c784dcdd7c41693544bcbef843 | [
"Apache-2.0"
] | 2 | 2019-03-01T11:14:59.000Z | 2019-10-02T17:47:59.000Z | snmpagent_unity/unity_impl/FrontendPortType.py | factioninc/snmp-unity-agent | 3525dc0fac60d1c784dcdd7c41693544bcbef843 | [
"Apache-2.0"
] | 2 | 2019-03-01T11:26:29.000Z | 2019-10-11T18:56:54.000Z | snmpagent_unity/unity_impl/FrontendPortType.py | factioninc/snmp-unity-agent | 3525dc0fac60d1c784dcdd7c41693544bcbef843 | [
"Apache-2.0"
] | 1 | 2019-10-03T21:09:17.000Z | 2019-10-03T21:09:17.000Z | class FrontendPortType(object):
def read_get(self, name, idx_name, unity_client):
return unity_client.get_frontend_port_type(idx_name)
class FrontendPortTypeColumn(object):
def get_idx(self, name, idx, unity_client):
return unity_client.get_frontend_ports()
| 31.555556 | 60 | 0.760563 | 38 | 284 | 5.342105 | 0.447368 | 0.216749 | 0.108374 | 0.216749 | 0.384236 | 0.384236 | 0.384236 | 0 | 0 | 0 | 0 | 0 | 0.15493 | 284 | 8 | 61 | 35.5 | 0.845833 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | false | 0 | 0 | 0.333333 | 1 | 0 | 0 | 0 | 0 | null | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 6 |
70113674ae77e20c347a07d91b3c957a4def55c7 | 80 | py | Python | cyk/util/__init__.py | azoimide/cyk | 0dd06fc70136246ae59b783c566889802e50b06c | [
"MIT"
] | null | null | null | cyk/util/__init__.py | azoimide/cyk | 0dd06fc70136246ae59b783c566889802e50b06c | [
"MIT"
] | null | null | null | cyk/util/__init__.py | azoimide/cyk | 0dd06fc70136246ae59b783c566889802e50b06c | [
"MIT"
] | null | null | null | from util import print_arr, map_string, map_to_string, levenshtein, rand_string
| 40 | 79 | 0.85 | 13 | 80 | 4.846154 | 0.769231 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.1 | 80 | 1 | 80 | 80 | 0.875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | 6 |
70198d40dd295ec404b44c31d9def82f7059c880 | 37 | py | Python | DP_FL_recreate/opacus/test.py | RosaYen/DP_FL_recreation | 30607645d9633483a4afa50c0e00bea65c0fb355 | [
"Apache-2.0"
] | null | null | null | DP_FL_recreate/opacus/test.py | RosaYen/DP_FL_recreation | 30607645d9633483a4afa50c0e00bea65c0fb355 | [
"Apache-2.0"
] | null | null | null | DP_FL_recreate/opacus/test.py | RosaYen/DP_FL_recreation | 30607645d9633483a4afa50c0e00bea65c0fb355 | [
"Apache-2.0"
] | 1 | 2020-12-09T05:56:32.000Z | 2020-12-09T05:56:32.000Z | def ttt():
print("My own opacus") | 18.5 | 26 | 0.594595 | 6 | 37 | 3.666667 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.216216 | 37 | 2 | 26 | 18.5 | 0.758621 | 0 | 0 | 0 | 0 | 0 | 0.342105 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.5 | true | 0 | 0 | 0 | 0.5 | 0.5 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 6 |
7056138687c96a3280247043e3962a083c1f8b17 | 40 | py | Python | app/forms/__init__.py | Manuel7AP/dobc_web_app | ebb775e18a4f03f70d1bdb14a7ec8142bce9e857 | [
"Apache-2.0"
] | 11 | 2015-08-28T17:48:20.000Z | 2021-11-16T12:20:16.000Z | app/forms/__init__.py | Manuel7AP/dobc_web_app | ebb775e18a4f03f70d1bdb14a7ec8142bce9e857 | [
"Apache-2.0"
] | 9 | 2015-02-23T01:48:42.000Z | 2021-12-07T09:59:57.000Z | app/forms/__init__.py | Manuel7AP/dobc_web_app | ebb775e18a4f03f70d1bdb14a7ec8142bce9e857 | [
"Apache-2.0"
] | 12 | 2015-01-06T17:21:21.000Z | 2021-08-05T19:15:27.000Z | from add_guest_form import AddGuestForm
| 20 | 39 | 0.9 | 6 | 40 | 5.666667 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.1 | 40 | 1 | 40 | 40 | 0.944444 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
7075a00d2799766d901056270ec8ae52f2a2fdb4 | 142 | py | Python | btu/btu_core/doctype/btu_configuration/test_btu_configuration.py | Govind-Jangid/btu | 856cce48fbf2fd349c064af67b2fc2d85918c61c | [
"MIT"
] | 7 | 2021-08-30T16:55:01.000Z | 2022-02-16T02:30:30.000Z | btu/btu_core/doctype/btu_configuration/test_btu_configuration.py | Govind-Jangid/btu | 856cce48fbf2fd349c064af67b2fc2d85918c61c | [
"MIT"
] | null | null | null | btu/btu_core/doctype/btu_configuration/test_btu_configuration.py | Govind-Jangid/btu | 856cce48fbf2fd349c064af67b2fc2d85918c61c | [
"MIT"
] | 6 | 2021-11-04T13:25:48.000Z | 2022-02-22T11:11:46.000Z | # Copyright (c) 2021, Datahenge LLC and Contributors
# See license.txt
import unittest
class TestBTUConfiguration(unittest.TestCase):
pass
| 17.75 | 52 | 0.795775 | 17 | 142 | 6.647059 | 0.941176 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.03252 | 0.133803 | 142 | 7 | 53 | 20.285714 | 0.886179 | 0.464789 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.333333 | 0.333333 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 6 |
70997eb4473dccdaef20e2dff4ad7306973cfcd5 | 52,119 | py | Python | tests/test_json_asserter.py | ShipChain/python-common | bdce2bf95f0418c782c1bdb6fed2c9cd8776918e | [
"Apache-2.0"
] | 2 | 2019-12-15T13:46:35.000Z | 2021-07-25T14:12:16.000Z | tests/test_json_asserter.py | ShipChain/python-common | bdce2bf95f0418c782c1bdb6fed2c9cd8776918e | [
"Apache-2.0"
] | 9 | 2019-10-24T19:49:21.000Z | 2020-10-26T19:38:52.000Z | tests/test_json_asserter.py | ShipChain/python-common | bdce2bf95f0418c782c1bdb6fed2c9cd8776918e | [
"Apache-2.0"
] | null | null | null | import pytest
import uuid
from rest_framework import status
from shipchain_common.test_utils import AssertionHelper
from unittest.mock import Mock
EXAMPLE_PLAIN = {
'id': '07b374c3-ed9b-4811-901a-d0c5d746f16a',
'name': 'example 1',
'field_1': 1,
'owner': {
'username': 'user1'
}
}
EXAMPLE_PLAIN_2 = {
'id': 'bf0d0b89-482f-40dd-b29b-9e5e05b83ed6',
'name': 'example 2',
'field_1': 2,
'owner': {
'username': 'user2'
}
}
EXAMPLE_PLAIN_3 = {
'id': '2aa1db84-6618-4e35-9b2a-f450c20699fe',
'name': 'example 3',
'field_1': 3,
'owner': {
'username': 'user3'
}
}
EXAMPLE_USER = {
'type': 'User',
'id': '07b374c3-ed9b-4811-901a-d0c5d746f16a',
'attributes': {
'username': 'user1'
}
}
EXAMPLE_RESOURCE = {
'type': 'ExampleResource',
'id': 'a6f554e9-3bd3-4972-90e1-b8a19aab7091',
'attributes': {
'name': 'example 1',
'field_1': 1
}
}
EXAMPLE_RESOURCE_2 = {
'type': 'ExampleResource',
'id': 'b717eff3-b021-4f3f-a2be-7cdc08a1bfb5',
'attributes': {
'name': 'example 2',
'field_1': 2
}
}
EXAMPLE_RESOURCE_3 = {
'type': 'ExampleResource',
'id': 'd72d5d56-c359-455e-876b-52835228c852',
'attributes': {
'name': 'example 3',
'field_1': 3
}
}
EXAMPLE_RESOURCE_4 = {
'type': 'ExampleResource',
'id': 'e8ba3cd9-9b5e-41fa-9b08-116284e968fd',
'attributes': {
'name': 'example 4',
'field_1': 4
}
}
@pytest.fixture
def vnd_single():
return {
'data': {
'type': EXAMPLE_RESOURCE['type'],
'id': EXAMPLE_RESOURCE['id'],
'attributes': EXAMPLE_RESOURCE['attributes'],
'relationships': {
'owner': {
'data': {
'type': EXAMPLE_USER['type'],
'id': EXAMPLE_USER['id']
}
},
'children': {
'meta': {
'count': 2
},
'data': [
{
'type': EXAMPLE_RESOURCE_2['type'],
'id': EXAMPLE_RESOURCE_2['id']
},
{
'type': EXAMPLE_RESOURCE_4['type'],
'id': EXAMPLE_RESOURCE_4['id']
}
]
}
},
'meta': {
'key': 'value',
'other_key': 'other_value',
}
},
'included': [
EXAMPLE_USER,
EXAMPLE_RESOURCE_2,
EXAMPLE_RESOURCE_4
]
}
@pytest.fixture
def vnd_list():
return {
'data': [
{
'type': EXAMPLE_RESOURCE['type'],
'id': EXAMPLE_RESOURCE['id'],
'attributes': EXAMPLE_RESOURCE['attributes'],
'relationships': {
'owner': {
'data': {
'type': EXAMPLE_USER['type'],
'id': EXAMPLE_USER['id']
}
},
'children': {
'meta': {
'count': 1
},
'data': [
{
'type': EXAMPLE_RESOURCE_2['type'],
'id': EXAMPLE_RESOURCE_2['id']
}
]
}
},
'meta': {
'key': 'value',
'other_key': 'other_value',
}
},
{
'type': EXAMPLE_RESOURCE_3['type'],
'id': EXAMPLE_RESOURCE_3['id'],
'attributes': EXAMPLE_RESOURCE_3['attributes'],
'relationships': {
'owner': {
'data': {
'type': EXAMPLE_USER['type'],
'id': EXAMPLE_USER['id']
}
},
'children': {
'meta': {
'count': 1
},
'data': [
{
'type': EXAMPLE_RESOURCE_2['type'],
'id': EXAMPLE_RESOURCE_2['id']
}
]
}
}
},
],
'included': [
EXAMPLE_USER,
EXAMPLE_RESOURCE_2
]
}
@pytest.fixture
def vnd_error():
return {
'errors': [
{
'detail': ''
}
]
}
@pytest.fixture
def vnd_error_400(vnd_error):
vnd_error['errors'][0]['detail'] = 'generic 400 error'
vnd_error['errors'][0]['source'] = {
'pointer': ''
}
return vnd_error
@pytest.fixture
def json_error():
return {
'detail': 'Error detail'
}
@pytest.fixture
def entity_ref_1():
return AssertionHelper.EntityRef(
resource=EXAMPLE_RESOURCE['type'],
pk=EXAMPLE_RESOURCE['id'],
attributes=EXAMPLE_RESOURCE['attributes'],
relationships={'owner': AssertionHelper.EntityRef(
resource=EXAMPLE_USER['type'],
pk=EXAMPLE_USER['id'],
)})
@pytest.fixture
def entity_ref_3():
return AssertionHelper.EntityRef(
resource=EXAMPLE_RESOURCE_3['type'],
pk=EXAMPLE_RESOURCE_3['id'],
attributes=EXAMPLE_RESOURCE_3['attributes'],
relationships={'owner': AssertionHelper.EntityRef(
resource=EXAMPLE_USER['type'],
pk=EXAMPLE_USER['id'],
)})
class TestAssertionHelper:
@pytest.fixture(autouse=True)
def make_build_response(self):
def _build_response(data, status_code=status.HTTP_200_OK):
return Mock(status_code=status_code, json=lambda: data)
self.build_response = _build_response
@pytest.fixture
def vnd_error_401(self, vnd_error):
vnd_error['errors'][0]['detail'] = 'Authentication credentials were not provided'
return vnd_error
@pytest.fixture
def vnd_error_403(self, vnd_error):
vnd_error['errors'][0]['detail'] = 'You do not have permission to perform this action'
return vnd_error
@pytest.fixture
def vnd_error_404(self, vnd_error):
vnd_error['errors'][0]['detail'] = 'Not found'
return vnd_error
@pytest.fixture
def vnd_error_405(self, vnd_error):
vnd_error['errors'][0]['detail'] = 'Method not allowed'
return vnd_error
def test_status_200(self, vnd_single, vnd_error_400):
response = self.build_response(vnd_single)
AssertionHelper.HTTP_200(response)
with pytest.raises(AssertionError) as err:
response = self.build_response(vnd_error_400, status_code=status.HTTP_400_BAD_REQUEST)
AssertionHelper.HTTP_200(response)
assert 'status_code 400 != 200' in str(err.value)
def test_status_201(self, vnd_single, vnd_error_400):
response = self.build_response(vnd_single, status_code=status.HTTP_201_CREATED)
AssertionHelper.HTTP_201(response)
with pytest.raises(AssertionError) as err:
response = self.build_response(vnd_error_400, status_code=status.HTTP_400_BAD_REQUEST)
AssertionHelper.HTTP_201(response)
assert 'status_code 400 != 201' in str(err.value)
def test_status_202(self, vnd_single, vnd_error_400):
response = self.build_response(vnd_single, status_code=status.HTTP_202_ACCEPTED)
AssertionHelper.HTTP_202(response)
with pytest.raises(AssertionError) as err:
response = self.build_response(vnd_error_400, status_code=status.HTTP_400_BAD_REQUEST)
AssertionHelper.HTTP_202(response)
assert 'status_code 400 != 202' in str(err.value)
def test_status_204(self, vnd_single, vnd_error_400):
response = self.build_response(vnd_single, status_code=status.HTTP_204_NO_CONTENT)
AssertionHelper.HTTP_204(response)
with pytest.raises(AssertionError) as err:
response = self.build_response(vnd_error_400, status_code=status.HTTP_400_BAD_REQUEST)
AssertionHelper.HTTP_204(response)
assert 'status_code 400 != 204' in str(err.value)
def test_status_400(self, vnd_single, vnd_error_400):
response = self.build_response(vnd_error_400, status_code=status.HTTP_400_BAD_REQUEST)
AssertionHelper.HTTP_400(response)
with pytest.raises(AssertionError) as err:
response = self.build_response(vnd_single)
AssertionHelper.HTTP_400(response)
assert 'status_code 200 != 400' in str(err.value)
def test_status_400_custom_message(self, vnd_error_400):
vnd_error_400['errors'][0]['detail'] = 'custom error message'
response = self.build_response(vnd_error_400, status_code=status.HTTP_400_BAD_REQUEST)
AssertionHelper.HTTP_400(response, error='custom error message')
def test_status_400_custom_pointer(self, vnd_error_400):
vnd_error_400['errors'][0]['detail'] = 'custom error message'
vnd_error_400['errors'][0]['source']['pointer'] = 'pointer'
response = self.build_response(vnd_error_400, status_code=status.HTTP_400_BAD_REQUEST)
AssertionHelper.HTTP_400(response, error='custom error message', pointer='pointer')
def test_status_400_json(self, vnd_single, json_error):
response = self.build_response(json_error, status_code=status.HTTP_400_BAD_REQUEST)
AssertionHelper.HTTP_400(response, error=json_error['detail'], vnd=False)
with pytest.raises(AssertionError) as err:
response = self.build_response(json_error, status_code=status.HTTP_200_OK)
AssertionHelper.HTTP_400(response, error='Different error', vnd=False)
assert 'status_code 200 != 400' in str(err.value)
with pytest.raises(AssertionError) as err:
response = self.build_response(json_error, status_code=status.HTTP_400_BAD_REQUEST)
AssertionHelper.HTTP_400(response, error='Different error', vnd=False)
assert f'Error Different error not found in {json_error["detail"]}' in str(err.value)
def test_status_401(self, vnd_single, vnd_error_401):
response = self.build_response(vnd_error_401, status_code=status.HTTP_401_UNAUTHORIZED)
AssertionHelper.HTTP_401(response)
with pytest.raises(AssertionError) as err:
response = self.build_response(vnd_single)
AssertionHelper.HTTP_401(response)
assert 'status_code 200 != 401' in str(err.value)
def test_status_401_json(self, vnd_single, json_error):
response = self.build_response(json_error, status_code=status.HTTP_401_UNAUTHORIZED)
AssertionHelper.HTTP_401(response, error=json_error['detail'], vnd=False)
with pytest.raises(AssertionError) as err:
response = self.build_response(json_error, status_code=status.HTTP_200_OK)
AssertionHelper.HTTP_401(response, error='Different error', vnd=False)
assert 'status_code 200 != 401' in str(err.value)
with pytest.raises(AssertionError) as err:
response = self.build_response(json_error, status_code=status.HTTP_401_UNAUTHORIZED)
AssertionHelper.HTTP_401(response, error='Different error', vnd=False)
assert f'Error Different error not found in {json_error["detail"]}' in str(err.value)
def test_status_403(self, vnd_single, vnd_error_403):
response = self.build_response(vnd_error_403, status_code=status.HTTP_403_FORBIDDEN)
AssertionHelper.HTTP_403(response)
with pytest.raises(AssertionError) as err:
response = self.build_response(vnd_single)
AssertionHelper.HTTP_403(response)
assert 'status_code 200 != 403' in str(err.value)
def test_status_403_json(self, vnd_single, json_error):
response = self.build_response(json_error, status_code=status.HTTP_403_FORBIDDEN)
AssertionHelper.HTTP_403(response, error=json_error['detail'], vnd=False)
with pytest.raises(AssertionError) as err:
response = self.build_response(json_error, status_code=status.HTTP_200_OK)
AssertionHelper.HTTP_403(response, error='Different error', vnd=False)
assert 'status_code 200 != 403' in str(err.value)
with pytest.raises(AssertionError) as err:
response = self.build_response(json_error, status_code=status.HTTP_403_FORBIDDEN)
AssertionHelper.HTTP_403(response, error='Different error', vnd=False)
assert f'Error Different error not found in {json_error["detail"]}' in str(err.value)
def test_status_404(self, vnd_single, vnd_error_404):
response = self.build_response(vnd_error_404, status_code=status.HTTP_404_NOT_FOUND)
AssertionHelper.HTTP_404(response)
with pytest.raises(AssertionError) as err:
response = self.build_response(vnd_single)
AssertionHelper.HTTP_404(response)
assert 'status_code 200 != 404' in str(err.value)
def test_status_404_json(self, vnd_single, json_error):
response = self.build_response(json_error, status_code=status.HTTP_404_NOT_FOUND)
AssertionHelper.HTTP_404(response, error=json_error['detail'], vnd=False)
with pytest.raises(AssertionError) as err:
response = self.build_response(json_error, status_code=status.HTTP_200_OK)
AssertionHelper.HTTP_404(response, error='Different error', vnd=False)
assert 'status_code 200 != 404' in str(err.value)
with pytest.raises(AssertionError) as err:
response = self.build_response(json_error, status_code=status.HTTP_404_NOT_FOUND)
AssertionHelper.HTTP_404(response, error='Different error', vnd=False)
assert f'Error Different error not found in {json_error["detail"]}' in str(err.value)
def test_status_405(self, vnd_single, vnd_error_405):
response = self.build_response(vnd_error_405, status_code=status.HTTP_405_METHOD_NOT_ALLOWED)
AssertionHelper.HTTP_405(response, error='Method not allowed')
with pytest.raises(AssertionError) as err:
response = self.build_response(vnd_single)
AssertionHelper.HTTP_405(response, error='Method not allowed')
assert 'status_code 200 != 405' in str(err.value)
def test_status_405_json(self, vnd_single, json_error):
response = self.build_response(json_error, status_code=status.HTTP_405_METHOD_NOT_ALLOWED)
AssertionHelper.HTTP_405(response, error=json_error['detail'], vnd=False)
with pytest.raises(AssertionError) as err:
response = self.build_response(json_error, status_code=status.HTTP_200_OK)
AssertionHelper.HTTP_405(response, error='Different error', vnd=False)
assert 'status_code 200 != 405' in str(err.value)
with pytest.raises(AssertionError) as err:
response = self.build_response(json_error, status_code=status.HTTP_405_METHOD_NOT_ALLOWED)
AssertionHelper.HTTP_405(response, error='Different error', vnd=False)
assert f'Error Different error not found in {json_error["detail"]}' in str(err.value)
def test_status_500(self, vnd_single, vnd_error):
vnd_error['errors'][0]['detail'] = 'A server error occurred.'
response = self.build_response(vnd_error, status_code=status.HTTP_500_INTERNAL_SERVER_ERROR)
AssertionHelper.HTTP_500(response)
with pytest.raises(AssertionError) as err:
response = self.build_response(vnd_single)
AssertionHelper.HTTP_500(response)
assert 'status_code 200 != 500' in str(err.value)
def test_status_500_custom_message(self, vnd_error):
vnd_error['errors'][0]['detail'] = 'custom error message'
response = self.build_response(vnd_error, status_code=status.HTTP_500_INTERNAL_SERVER_ERROR)
AssertionHelper.HTTP_500(response, error='custom error message')
def test_status_503(self, vnd_single, vnd_error):
vnd_error['errors'][0]['detail'] = 'Service temporarily unavailable, try again later'
response = self.build_response(vnd_error, status_code=status.HTTP_503_SERVICE_UNAVAILABLE)
AssertionHelper.HTTP_503(response)
with pytest.raises(AssertionError) as err:
response = self.build_response(vnd_single)
AssertionHelper.HTTP_503(response)
assert 'status_code 200 != 503' in str(err.value)
def test_status_503_custom_message(self, vnd_error):
vnd_error['errors'][0]['detail'] = 'custom error message'
response = self.build_response(vnd_error, status_code=status.HTTP_503_SERVICE_UNAVAILABLE)
AssertionHelper.HTTP_503(response, error='custom error message')
def test_status_wrong_message(self, vnd_error_404):
response = self.build_response(vnd_error_404, status_code=status.HTTP_404_NOT_FOUND)
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_404(response, error='Not the correct error')
assert f'Error `Not the correct error` not found in' in str(err.value)
def test_status_400_wrong_pointer(self, vnd_error_400):
vnd_error_400['errors'][0]['detail'] = 'custom error message'
vnd_error_400['errors'][0]['source']['pointer'] = 'pointer'
response = self.build_response(vnd_error_400, status_code=status.HTTP_400_BAD_REQUEST)
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_400(response, error='custom error message', pointer='Not the correct pointer')
assert f'Error `Not the correct pointer` not found in' in str(err.value)
def test_status_404_wrong_pointer(self, vnd_error):
vnd_error['errors'][0]['detail'] = 'Not found'
vnd_error['errors'][0]['source'] = {
'pointer': 'correct pointer'
}
response = self.build_response(vnd_error, status_code=status.HTTP_404_NOT_FOUND)
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_404(response, pointer='Not the correct pointer')
assert f'Error `Not the correct pointer` not found in' in str(err.value)
def test_status_pointer_requires_correct_error(self, vnd_error):
vnd_error['errors'][0]['detail'] = 'Not found'
vnd_error['errors'][0]['source'] = {
'pointer': 'correct pointer'
}
response = self.build_response(vnd_error, status_code=status.HTTP_404_NOT_FOUND)
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_404(response, error='Not the correct error', pointer='Not the correct pointer')
assert f'Error `Not the correct error` not found in' in str(err.value)
def test_status_in_second_error(self, vnd_error_404):
vnd_error_404['errors'].append({'detail': 'another error'})
response = self.build_response(vnd_error_404, status_code=status.HTTP_404_NOT_FOUND)
AssertionHelper.HTTP_404(response, error='another error')
def test_status_missing_in_multiple_errors(self, vnd_error_404):
vnd_error_404['errors'].append({'detail': 'another error'})
response = self.build_response(vnd_error_404, status_code=status.HTTP_404_NOT_FOUND)
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_404(response, error='Not the correct error')
assert f'Error `Not the correct error` not found in' in str(err.value)
def test_exclusive_entity_refs_or_fields(self, vnd_single):
response = self.build_response(vnd_single)
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_200(response, entity_refs=AssertionHelper.EntityRef(), attributes={'test': 1})
assert 'Use Only `entity_refs` or explicit `attributes`, `relationships`, `resource`, and `pk` but not both' \
in str(err.value)
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_200(response, entity_refs=AssertionHelper.EntityRef(), relationships={'test': 1})
assert 'Use Only `entity_refs` or explicit `attributes`, `relationships`, `resource`, and `pk` but not both' \
in str(err.value)
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_200(response, entity_refs=AssertionHelper.EntityRef(), resource='test')
assert 'Use Only `entity_refs` or explicit `attributes`, `relationships`, `resource`, and `pk` but not both' \
in str(err.value)
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_200(response, entity_refs=AssertionHelper.EntityRef(), pk='test')
assert 'Use Only `entity_refs` or explicit `attributes`, `relationships`, `resource`, and `pk` but not both' \
in str(err.value)
def test_vnd_with_non_jsonapi_data(self):
response = self.build_response(EXAMPLE_PLAIN)
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_200(response, attributes=EXAMPLE_PLAIN)
assert f'response does not contain `data` property' in str(err.value)
def test_vnd_is_list(self, vnd_single, vnd_list):
single_response = self.build_response(vnd_single)
list_response = self.build_response(vnd_list)
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_200(single_response, is_list=True)
assert 'Response should be a list' in str(err.value)
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_200(list_response)
assert 'Response should not be a list' in str(err.value)
def test_vnd_attributes_match(self, vnd_single):
response = self.build_response(vnd_single)
AssertionHelper.HTTP_200(response, attributes=EXAMPLE_RESOURCE['attributes'])
def test_vnd_attributes_not_match(self, vnd_single):
response = self.build_response(vnd_single)
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_200(response, attributes=EXAMPLE_RESOURCE_2['attributes'])
assert f'Attribute Value incorrect `{EXAMPLE_RESOURCE_2["attributes"]["name"]}` in ' in str(err.value)
def test_vnd_relationships_should_be_entity_ref(self, vnd_single):
response = self.build_response(vnd_single)
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_200(response, relationships={'owner': EXAMPLE_RESOURCE_2})
assert f'asserted relationship is not an EntityRef' in str(err.value)
def test_vnd_relationships_match(self, vnd_single):
response = self.build_response(vnd_single)
AssertionHelper.HTTP_200(response, relationships={'owner': AssertionHelper.EntityRef(
resource=EXAMPLE_USER['type'],
pk=EXAMPLE_USER['id'],
)})
def test_vnd_relationships_match_list(self, vnd_single):
response = self.build_response(vnd_single)
AssertionHelper.HTTP_200(response, relationships={
'owner': AssertionHelper.EntityRef(
resource=EXAMPLE_USER['type'],
pk=EXAMPLE_USER['id'],
),
'children': [
AssertionHelper.EntityRef(
resource=EXAMPLE_RESOURCE_2['type'],
pk=EXAMPLE_RESOURCE_2['id'],
),
AssertionHelper.EntityRef(
resource=EXAMPLE_RESOURCE_4['type'],
pk=EXAMPLE_RESOURCE_4['id'],
),
]})
def test_vnd_relationships_not_match(self, vnd_single):
response = self.build_response(vnd_single)
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_200(response, relationships={'owner': AssertionHelper.EntityRef(
resource=EXAMPLE_RESOURCE['type'],
pk=EXAMPLE_RESOURCE['id'],
)})
assert f'EntityRef resource type `{EXAMPLE_RESOURCE["type"]}` does not match' in str(err.value)
def test_vnd_relationships_not_match_in_list(self, vnd_single):
response = self.build_response(vnd_single)
relationship = AssertionHelper.EntityRef(resource=EXAMPLE_RESOURCE_3["type"],
pk=EXAMPLE_RESOURCE_3["id"],
attributes={})
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_200(response, relationships={'children': [
AssertionHelper.EntityRef(
resource=EXAMPLE_RESOURCE_2['type'],
pk=EXAMPLE_RESOURCE_2['id'],
),
relationship,
]})
assert f'{relationship} NOT IN ' in str(err.value)
def test_vnd_included_should_be_entity_ref(self, vnd_single):
response = self.build_response(vnd_single)
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_200(response, included=EXAMPLE_RESOURCE_2)
assert f'asserted includes is not an EntityRef' in str(err.value)
def test_vnd_included_full_match(self, vnd_single):
response = self.build_response(vnd_single)
AssertionHelper.HTTP_200(response, included=AssertionHelper.EntityRef(
resource=EXAMPLE_RESOURCE_2['type'],
pk=EXAMPLE_RESOURCE_2['id'],
attributes=EXAMPLE_RESOURCE_2['attributes'],
))
def test_vnd_included_full_not_match(self, vnd_single):
response = self.build_response(vnd_single)
include = AssertionHelper.EntityRef(
resource=EXAMPLE_RESOURCE['type'],
pk=EXAMPLE_RESOURCE['id'],
attributes=EXAMPLE_RESOURCE['attributes'],
)
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_200(response, included=include)
assert f'{include} NOT IN' in str(err.value)
def test_vnd_included_type_pk_match(self, vnd_single):
response = self.build_response(vnd_single)
AssertionHelper.HTTP_200(response, included=AssertionHelper.EntityRef(
resource=EXAMPLE_RESOURCE_2['type'],
pk=EXAMPLE_RESOURCE_2['id'],
))
def test_vnd_included_type_pk_not_match(self, vnd_single):
response = self.build_response(vnd_single)
include = AssertionHelper.EntityRef(
resource=EXAMPLE_RESOURCE['type'],
pk=EXAMPLE_RESOURCE['id'],
)
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_200(response, included=include)
assert f'{include} NOT IN' in str(err.value)
def test_vnd_included_attributes_match(self, vnd_single):
response = self.build_response(vnd_single)
AssertionHelper.HTTP_200(response, included=AssertionHelper.EntityRef(
attributes=EXAMPLE_RESOURCE_2['attributes'],
))
def test_vnd_included_attributes_not_match(self, vnd_single):
response = self.build_response(vnd_single)
include = AssertionHelper.EntityRef(
attributes=EXAMPLE_RESOURCE['attributes'],
)
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_200(response, included=include)
assert f'{include} NOT IN' in str(err.value)
def test_vnd_included_list_all_match(self, vnd_single):
response = self.build_response(vnd_single)
AssertionHelper.HTTP_200(response, included=[
AssertionHelper.EntityRef(
resource=EXAMPLE_RESOURCE_2['type'],
pk=EXAMPLE_RESOURCE_2['id'],
attributes=EXAMPLE_RESOURCE_2['attributes']),
AssertionHelper.EntityRef(
resource=EXAMPLE_USER['type'],
pk=EXAMPLE_USER['id'],
attributes=EXAMPLE_USER['attributes']),
])
def test_vnd_included_list_one_match(self, vnd_single):
response = self.build_response(vnd_single)
include_1 = AssertionHelper.EntityRef(
resource=EXAMPLE_RESOURCE['type'],
pk=EXAMPLE_RESOURCE['id'],
attributes=EXAMPLE_RESOURCE['attributes'])
include_2 = AssertionHelper.EntityRef(
resource=EXAMPLE_USER['type'],
pk=EXAMPLE_USER['id'],
attributes=EXAMPLE_USER['attributes'])
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_200(response, included=[include_1, include_2])
assert f'{include_1} NOT IN' in str(err.value)
def test_vnd_included_list_none_match(self, vnd_single):
response = self.build_response(vnd_single)
include_1 = AssertionHelper.EntityRef(
resource=EXAMPLE_RESOURCE['type'],
pk=EXAMPLE_RESOURCE['id'],
attributes=EXAMPLE_RESOURCE['attributes'])
include_2 = AssertionHelper.EntityRef(
resource=EXAMPLE_RESOURCE_3['type'],
pk=EXAMPLE_RESOURCE_3['id'],
attributes=EXAMPLE_RESOURCE_3['attributes'])
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_200(response, included=[include_1, include_2])
assert f'{include_1} NOT IN' in str(err.value)
def test_entity_list_non_list_response(self, vnd_single):
response = self.build_response(vnd_single)
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_200(response, entity_refs=[AssertionHelper.EntityRef()])
assert 'entity_refs should not be a list for a non-list response' in str(err.value)
def test_vnd_entity_uuid_pk(self, vnd_single):
response = self.build_response(vnd_single)
AssertionHelper.HTTP_200(
response,
entity_refs=AssertionHelper.EntityRef(
resource=EXAMPLE_RESOURCE['type'],
pk=uuid.UUID(EXAMPLE_RESOURCE['id']),
attributes=EXAMPLE_RESOURCE['attributes'],
relationships={'owner': AssertionHelper.EntityRef(
resource=EXAMPLE_USER['type'],
pk=uuid.UUID(EXAMPLE_USER['id']),
)}
),
included=AssertionHelper.EntityRef(
resource=EXAMPLE_RESOURCE_2['type'],
pk=uuid.UUID(EXAMPLE_RESOURCE_2['id']),
))
def test_vnd_entity_full_match(self, vnd_single):
response = self.build_response(vnd_single)
AssertionHelper.HTTP_200(response, entity_refs=AssertionHelper.EntityRef(
resource=EXAMPLE_RESOURCE['type'],
pk=EXAMPLE_RESOURCE['id'],
attributes=EXAMPLE_RESOURCE['attributes'],
relationships={'owner': AssertionHelper.EntityRef(
resource=EXAMPLE_USER['type'],
pk=EXAMPLE_USER['id'],
)}
))
def test_vnd_entity_full_type_not_match(self, vnd_single):
response = self.build_response(vnd_single)
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_200(response, entity_refs=AssertionHelper.EntityRef(
resource=EXAMPLE_USER['type'],
pk=EXAMPLE_RESOURCE['id'],
attributes=EXAMPLE_RESOURCE['attributes'],
relationships={'owner': AssertionHelper.EntityRef(
resource=EXAMPLE_USER['type'],
pk=EXAMPLE_USER['id'],
)}
))
assert f'Invalid Resource Type in' in str(err.value)
def test_vnd_entity_full_id_not_match(self, vnd_single):
response = self.build_response(vnd_single)
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_200(response, entity_refs=AssertionHelper.EntityRef(
resource=EXAMPLE_RESOURCE['type'],
pk=EXAMPLE_USER['id'],
attributes=EXAMPLE_RESOURCE['attributes'],
relationships={'owner': AssertionHelper.EntityRef(
resource=EXAMPLE_USER['type'],
pk=EXAMPLE_USER['id'],
)}
))
assert f'Invalid ID in' in str(err.value)
def test_vnd_entity_full_attributes_missing(self, vnd_single):
response = self.build_response(vnd_single)
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_200(response, entity_refs=AssertionHelper.EntityRef(
resource=EXAMPLE_RESOURCE['type'],
pk=EXAMPLE_RESOURCE['id'],
attributes=EXAMPLE_USER['attributes'],
relationships={'owner': AssertionHelper.EntityRef(
resource=EXAMPLE_USER['type'],
pk=EXAMPLE_USER['id'],
)}
))
assert f'Missing Attribute `username` in' in str(err.value)
def test_vnd_entity_full_attributes_not_match(self, vnd_single):
response = self.build_response(vnd_single)
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_200(response, entity_refs=AssertionHelper.EntityRef(
resource=EXAMPLE_RESOURCE['type'],
pk=EXAMPLE_RESOURCE['id'],
attributes=EXAMPLE_RESOURCE_2['attributes'],
relationships={'owner': AssertionHelper.EntityRef(
resource=EXAMPLE_USER['type'],
pk=EXAMPLE_USER['id'],
)}
))
assert f'Attribute Value incorrect `example 2` in' in str(err.value)
def test_vnd_entity_full_relationships_type_not_match(self, vnd_single):
response = self.build_response(vnd_single)
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_200(response, entity_refs=AssertionHelper.EntityRef(
resource=EXAMPLE_RESOURCE['type'],
pk=EXAMPLE_RESOURCE['id'],
attributes=EXAMPLE_RESOURCE['attributes'],
relationships={'owner': AssertionHelper.EntityRef(
resource=EXAMPLE_RESOURCE['type'],
pk=EXAMPLE_USER['id'],
)}
))
assert f'EntityRef resource type `{EXAMPLE_RESOURCE["type"]}` does not match' in str(err.value)
def test_vnd_entity_full_relationships_pk_not_match(self, vnd_single):
response = self.build_response(vnd_single)
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_200(response, entity_refs=AssertionHelper.EntityRef(
resource=EXAMPLE_RESOURCE['type'],
pk=EXAMPLE_RESOURCE['id'],
attributes=EXAMPLE_RESOURCE['attributes'],
relationships={'owner': AssertionHelper.EntityRef(
resource=EXAMPLE_USER['type'],
pk=EXAMPLE_RESOURCE['id'],
)}
))
assert f'EntityRef ID `{EXAMPLE_RESOURCE["id"]}` does not match' in str(err.value)
def test_vnd_entity_type_pk_match(self, vnd_single):
response = self.build_response(vnd_single)
AssertionHelper.HTTP_200(response, entity_refs=AssertionHelper.EntityRef(
resource=EXAMPLE_RESOURCE['type'],
pk=EXAMPLE_RESOURCE['id'],
))
def test_vnd_entity_type_pk_not_match(self, vnd_single):
response = self.build_response(vnd_single)
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_200(response, entity_refs=AssertionHelper.EntityRef(
resource=EXAMPLE_USER['type'],
pk=EXAMPLE_RESOURCE['id'],
))
assert f'Invalid Resource Type in' in str(err.value)
def test_vnd_entity_attribute_only_match(self, vnd_single):
response = self.build_response(vnd_single)
AssertionHelper.HTTP_200(response, entity_refs=AssertionHelper.EntityRef(
attributes=EXAMPLE_RESOURCE['attributes']
))
def test_vnd_entity_attribute_only_not_match(self, vnd_single):
response = self.build_response(vnd_single)
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_200(response, entity_refs=AssertionHelper.EntityRef(
attributes=EXAMPLE_RESOURCE_2['attributes'],
))
assert f'Attribute Value incorrect `example 2` in' in str(err.value)
def test_vnd_list_entity_full_match(self, vnd_list):
response = self.build_response(vnd_list)
AssertionHelper.HTTP_200(response, is_list=True, entity_refs=AssertionHelper.EntityRef(
resource=EXAMPLE_RESOURCE['type'],
pk=EXAMPLE_RESOURCE['id'],
attributes=EXAMPLE_RESOURCE['attributes'],
relationships={'owner': AssertionHelper.EntityRef(
resource=EXAMPLE_USER['type'],
pk=EXAMPLE_USER['id'],
)}
))
def test_vnd_list_entity_list_all_match(self, vnd_list, entity_ref_1, entity_ref_3):
response = self.build_response(vnd_list)
AssertionHelper.HTTP_200(response, is_list=True, entity_refs=[entity_ref_1, entity_ref_3])
def test_vnd_list_count(self, vnd_list):
response = self.build_response(vnd_list)
AssertionHelper.HTTP_200(response, is_list=True, count=len(vnd_list['data']))
def test_vnd_list_wrong_count(self, vnd_list):
list_length = len(vnd_list['data'])
response = self.build_response(vnd_list)
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_200(response, is_list=True, count=list_length - 1)
assert f'Difference in count of response_data, got {list_length} expected {list_length - 1}' in str(err.value)
def test_vnd_single_count(self, vnd_single):
response = self.build_response(vnd_single)
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_200(response, count=1)
assert f'Count is only checked when response is list' in str(err.value)
def test_vnd_list_ordering(self, vnd_list, entity_ref_1, entity_ref_3):
response = self.build_response(vnd_list)
AssertionHelper.HTTP_200(response, is_list=True, entity_refs=[entity_ref_1, entity_ref_3], check_ordering=True)
def test_vnd_list_wrong_ordering(self, vnd_list, entity_ref_1, entity_ref_3):
response = self.build_response(vnd_list)
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_200(response, is_list=True, entity_refs=[entity_ref_3, entity_ref_1],
check_ordering=True)
assert 'Invalid ID in ' in str(err.value)
def test_vnd_list_wrong_ordering_amount(self, vnd_list, entity_ref_1, entity_ref_3):
response = self.build_response(vnd_list)
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_200(response, is_list=True, entity_refs=[entity_ref_1, entity_ref_3, entity_ref_1],
check_ordering=True)
assert 'Error: more entity refs supplied than available in response data. ' in str(err.value)
def test_vnd_single_ordering(self, vnd_single, entity_ref_1):
response = self.build_response(vnd_single)
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_200(response, entity_refs=entity_ref_1, check_ordering=True)
assert f'Ordering is only checked when response is list' in str(err.value)
def test_vnd_list_entity_list_one_not_match(self, vnd_list, entity_ref_1, entity_ref_3):
response = self.build_response(vnd_list)
entity_ref_3.pk = EXAMPLE_RESOURCE_2['id']
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_200(response, is_list=True, entity_refs=[entity_ref_1, entity_ref_3])
assert f'{entity_ref_3} NOT IN' in str(err.value)
def test_plain_json_valid_parameters(self):
response = self.build_response(EXAMPLE_PLAIN)
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_200(response, vnd=False, entity_refs={AssertionHelper.EntityRef()})
assert f'entity_refs not valid when vnd=False' in str(err.value)
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_200(response, vnd=False, relationships=AssertionHelper.EntityRef())
assert f'relationships not valid when vnd=False' in str(err.value)
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_200(response, vnd=False, included=AssertionHelper.EntityRef())
assert f'included not valid when vnd=False' in str(err.value)
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_200(response, vnd=False)
assert f'attributes must be provided when vnd=False' in str(err.value)
def test_plain_json_attributes(self):
response = self.build_response(EXAMPLE_PLAIN)
AssertionHelper.HTTP_200(response, vnd=False, attributes=EXAMPLE_PLAIN)
def test_plain_json_attributes_top_level_missing(self):
response = self.build_response(EXAMPLE_PLAIN)
invalid_attributes = EXAMPLE_PLAIN.copy()
invalid_attributes['new_field'] = 1
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_200(response, vnd=False, attributes=invalid_attributes)
assert f'Missing Attribute `new_field` in ' in str(err.value)
def test_plain_json_attributes_top_level_mismatch(self):
response = self.build_response(EXAMPLE_PLAIN)
invalid_attributes = EXAMPLE_PLAIN.copy()
invalid_attributes['id'] = 1
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_200(response, vnd=False, attributes=invalid_attributes)
assert f'Attribute Value incorrect `1` in ' in str(err.value)
def test_plain_json_attributes_nested_missing(self):
response = self.build_response(EXAMPLE_PLAIN)
invalid_attributes = EXAMPLE_PLAIN.copy()
invalid_attributes['owner'] = EXAMPLE_PLAIN['owner'].copy()
invalid_attributes['owner']['new_field'] = 'test'
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_200(response, vnd=False, attributes=invalid_attributes)
assert f'Missing Attribute `new_field` in ' in str(err.value)
def test_plain_json_attributes_nested_mismatch(self):
response = self.build_response(EXAMPLE_PLAIN)
invalid_attributes = EXAMPLE_PLAIN.copy()
invalid_attributes['owner'] = EXAMPLE_PLAIN['owner'].copy()
invalid_attributes['owner']['id'] = 'test'
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_200(response, vnd=False, attributes=invalid_attributes)
assert f'Missing Attribute `id` in ' in str(err.value)
def test_plain_json_attributes_list_assertions(self):
single_response = self.build_response(EXAMPLE_PLAIN)
list_response = self.build_response([EXAMPLE_PLAIN, EXAMPLE_PLAIN_2])
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_200(single_response, vnd=False, is_list=True, attributes=EXAMPLE_PLAIN)
assert f'Response should be a list' in str(err.value)
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_200(list_response, vnd=False, attributes=EXAMPLE_PLAIN)
assert f'Response should not be a list' in str(err.value)
def test_plain_json_attributes_list_single_match(self):
response = self.build_response([EXAMPLE_PLAIN, EXAMPLE_PLAIN_2])
AssertionHelper.HTTP_200(response, vnd=False, is_list=True, attributes=EXAMPLE_PLAIN)
def test_plain_json_attributes_list_both_match(self):
response = self.build_response([EXAMPLE_PLAIN, EXAMPLE_PLAIN_2])
AssertionHelper.HTTP_200(response, vnd=False, is_list=True, attributes=[EXAMPLE_PLAIN, EXAMPLE_PLAIN_2])
def test_plain_json_attributes_list_one_missing(self):
response = self.build_response([EXAMPLE_PLAIN, EXAMPLE_PLAIN_2])
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_200(response, vnd=False, is_list=True, attributes=[EXAMPLE_PLAIN, EXAMPLE_PLAIN_3])
assert f'{EXAMPLE_PLAIN_3} NOT IN ' in str(err.value)
def test_plain_json_attributes_list_ordering(self):
response = self.build_response([EXAMPLE_PLAIN, EXAMPLE_PLAIN_2])
AssertionHelper.HTTP_200(response, vnd=False, is_list=True, attributes=[EXAMPLE_PLAIN, EXAMPLE_PLAIN_2],
check_ordering=True)
def test_plain_json_attributes_list_wrong_ordering(self):
response = self.build_response([EXAMPLE_PLAIN, EXAMPLE_PLAIN_2])
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_200(response, vnd=False, is_list=True, attributes=[EXAMPLE_PLAIN_2, EXAMPLE_PLAIN],
check_ordering=True)
assert f'Attribute Value incorrect ' in str(err.value)
def test_plain_json_attributes_list_wrong_ordering_size(self):
response = self.build_response([EXAMPLE_PLAIN, EXAMPLE_PLAIN_2])
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_200(response, vnd=False, is_list=True, check_ordering=True,
attributes=[EXAMPLE_PLAIN, EXAMPLE_PLAIN_2, EXAMPLE_PLAIN])
assert 'Error: more attributes supplied than available in response. 3 found asserted 2' in str(err.value)
def test_plain_json_attributes_list_nested_missing(self):
response = self.build_response([EXAMPLE_PLAIN, EXAMPLE_PLAIN_2])
invalid_attributes = EXAMPLE_PLAIN.copy()
invalid_attributes['owner'] = EXAMPLE_PLAIN['owner'].copy()
invalid_attributes['owner']['new_field'] = 'test'
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_200(response, vnd=False, is_list=True, attributes=invalid_attributes)
assert f'{invalid_attributes} NOT IN ' in str(err.value)
def test_plain_json_attributes_list_nested_mismatch(self):
response = self.build_response([EXAMPLE_PLAIN, EXAMPLE_PLAIN_2])
invalid_attributes = EXAMPLE_PLAIN.copy()
invalid_attributes['owner'] = EXAMPLE_PLAIN['owner'].copy()
invalid_attributes['owner']['id'] = 'test'
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_200(response, vnd=False, is_list=True, attributes=invalid_attributes)
assert f'{invalid_attributes} NOT IN ' in str(err.value)
def test_plain_json_list_count(self):
response = self.build_response([EXAMPLE_PLAIN, EXAMPLE_PLAIN_2])
AssertionHelper.HTTP_200(response, vnd=False, is_list=True, count=2,
attributes=[EXAMPLE_PLAIN, EXAMPLE_PLAIN_2])
def test_plain_json_list_wrong_count(self):
response = self.build_response([EXAMPLE_PLAIN, EXAMPLE_PLAIN_2])
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_200(response, vnd=False, attributes=[EXAMPLE_PLAIN, EXAMPLE_PLAIN_2], is_list=True,
count=1)
assert 'Difference in count of response_data, got 2 expected 1' in str(err.value)
def test_plain_json_single_count(self, vnd_single):
response = self.build_response(vnd_single)
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_200(response, count=1)
assert f'Count is only checked when response is list' in str(err.value)
def test_vnd_meta(self, vnd_single):
response = self.build_response(vnd_single)
AssertionHelper.HTTP_200(response, entity_refs=AssertionHelper.EntityRef(
resource=EXAMPLE_RESOURCE['type'],
meta={
'key': 'value',
'other_key': 'other_value'
},
))
def test_vnd_meta_mismatch(self, vnd_single):
response = self.build_response(vnd_single)
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_200(response, entity_refs=AssertionHelper.EntityRef(
resource=EXAMPLE_RESOURCE['type'],
meta={
'key': 'different value'
},
))
assert f'Meta field `key` had value `value` not `different value` as expected.' in str(err.value)
def test_vnd_meta_invalid_key(self, vnd_single):
response = self.build_response(vnd_single)
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_200(response, entity_refs=AssertionHelper.EntityRef(
resource=EXAMPLE_RESOURCE['type'],
meta={
'invalid_key': 'value'
},
))
assert f'Meta field `invalid_key` not found' in str(err.value)
def test_vnd_no_meta(self, vnd_single):
vnd_single['data'].pop('meta')
response = self.build_response(vnd_single)
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_200(response, entity_refs=AssertionHelper.EntityRef(
resource=EXAMPLE_RESOURCE['type'],
meta={
'key': 'value'
},
))
assert 'Meta missing' in str(err.value)
def test_vnd_invalid_meta_format(self, vnd_single):
response = self.build_response(vnd_single)
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_200(response, entity_refs=AssertionHelper.EntityRef(
resource=EXAMPLE_RESOURCE['type'],
meta=[{
'key': 'value'
}],
))
assert 'Invalid format for meta data <class \'list\'>, must be dict' in str(err.value)
def test_vnd_meta_list(self, vnd_list, entity_ref_1):
entity_ref_1.meta = {
'key': 'value',
'other_key': 'other_value'
}
response = self.build_response(vnd_list)
AssertionHelper.HTTP_200(response, entity_refs=entity_ref_1, is_list=True)
def test_vnd_list_meta_mismatch(self, vnd_list, entity_ref_1):
response = self.build_response(vnd_list)
entity_ref_1.meta = {
'key': 'different value'
}
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_200(response, entity_refs=entity_ref_1, is_list=True)
assert f'Meta field `key` had value `value` not `different value` as expected.' in str(err.value)
def test_vnd_list_meta_invalid_key(self, vnd_list, entity_ref_1):
entity_ref_1.meta = {
'invalid_key': 'value'
}
response = self.build_response(vnd_list)
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_200(response, entity_refs=entity_ref_1, is_list=True)
assert f'Meta field `invalid_key` not found' in str(err.value)
def test_vnd_list_no_meta(self, vnd_list, entity_ref_3):
entity_ref_3.meta = {
'key': 'value',
'other_key': 'other_value'
}
response = self.build_response(vnd_list)
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_200(response, entity_refs=entity_ref_3, is_list=True)
assert 'Meta missing' in str(err.value)
def test_vnd_list_invalid_meta_format(self, vnd_list, entity_ref_1):
entity_ref_1.meta = [{
'invalid_key': 'value'
}]
response = self.build_response(vnd_list)
with pytest.raises(AssertionError) as err:
AssertionHelper.HTTP_200(response, entity_refs=entity_ref_1, is_list=True)
assert 'Invalid format for meta data <class \'list\'>, must be dict' in str(err.value)
| 43.109181 | 119 | 0.645446 | 6,001 | 52,119 | 5.329945 | 0.039327 | 0.074848 | 0.064311 | 0.093794 | 0.922401 | 0.890136 | 0.864874 | 0.847116 | 0.815476 | 0.797874 | 0 | 0.02927 | 0.255339 | 52,119 | 1,208 | 120 | 43.144868 | 0.794852 | 0 | 0 | 0.632265 | 0 | 0.004008 | 0.122719 | 0.010054 | 0 | 0 | 0 | 0 | 0.324649 | 1 | 0.11022 | false | 0 | 0.00501 | 0.007014 | 0.128257 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
70a6686e03770b9d7201c01ddf75200dc2471804 | 33 | py | Python | neureca/explainer/__init__.py | hojinYang/neureca | b1eb7246b731b7a0c7264b47c1c27239b9fe1224 | [
"Apache-2.0"
] | 7 | 2021-08-24T14:34:33.000Z | 2021-12-10T12:43:50.000Z | neureca/explainer/__init__.py | hojinYang/neureca | b1eb7246b731b7a0c7264b47c1c27239b9fe1224 | [
"Apache-2.0"
] | null | null | null | neureca/explainer/__init__.py | hojinYang/neureca | b1eb7246b731b7a0c7264b47c1c27239b9fe1224 | [
"Apache-2.0"
] | 1 | 2021-09-10T17:50:38.000Z | 2021-09-10T17:50:38.000Z | from .explainer import Explainer
| 16.5 | 32 | 0.848485 | 4 | 33 | 7 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.121212 | 33 | 1 | 33 | 33 | 0.965517 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
5609d55ac540176069d9eab4ee6e0cefe98d642f | 18 | py | Python | AGC2021_submit/seareale/odach/__init__.py | seareale/AGC2021_object-detection | a32d1302e9c5b372047faad3924b72ea1e3fc35a | [
"MIT"
] | 25 | 2020-10-29T05:42:44.000Z | 2022-02-10T23:40:14.000Z | AGC2021_submit/seareale/odach/__init__.py | seareale/AGC2021_object-detection | a32d1302e9c5b372047faad3924b72ea1e3fc35a | [
"MIT"
] | 15 | 2020-10-21T02:24:57.000Z | 2021-07-13T19:27:47.000Z | AGC2021_submit/seareale/odach/__init__.py | seareale/AGC2021_object-detection | a32d1302e9c5b372047faad3924b72ea1e3fc35a | [
"MIT"
] | 2 | 2020-11-13T18:03:55.000Z | 2021-06-30T08:58:48.000Z | from .oda import * | 18 | 18 | 0.722222 | 3 | 18 | 4.333333 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.166667 | 18 | 1 | 18 | 18 | 0.866667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
569789979d2e770479b8a978c02b632d2ef0b475 | 186 | py | Python | Ocular/model.py | adi-797/Iris-and-Pupil-detection-using-OpenCV | 2421a25ff99ca67f213c05fbf482c49e7c443881 | [
"MIT"
] | null | null | null | Ocular/model.py | adi-797/Iris-and-Pupil-detection-using-OpenCV | 2421a25ff99ca67f213c05fbf482c49e7c443881 | [
"MIT"
] | null | null | null | Ocular/model.py | adi-797/Iris-and-Pupil-detection-using-OpenCV | 2421a25ff99ca67f213c05fbf482c49e7c443881 | [
"MIT"
] | null | null | null | import cv2, numpy as np
class model:
def bilLevels(img):
print ("bil")
def cholLevels(img):
print ("bil")
def catLevels(img):
print ("bil")
| 14.307692 | 25 | 0.526882 | 22 | 186 | 4.454545 | 0.636364 | 0.244898 | 0.336735 | 0.285714 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008264 | 0.349462 | 186 | 12 | 26 | 15.5 | 0.801653 | 0 | 0 | 0.375 | 0 | 0 | 0.051724 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.375 | false | 0 | 0.125 | 0 | 0.625 | 0.375 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 6 |
3b12fd21642cf2bdb2102659e8df588d41c1a540 | 11,198 | py | Python | src/data_loader.py | SRI-CSL/Trinity | 3bc01fa6a6dc5a3e783f5ce1ccd61b4fe1ea5998 | [
"MIT"
] | 1 | 2021-04-27T01:35:45.000Z | 2021-04-27T01:35:45.000Z | src/data_loader.py | Tubbz-alt/Trinity-1 | 3bc01fa6a6dc5a3e783f5ce1ccd61b4fe1ea5998 | [
"MIT"
] | 1 | 2021-08-06T20:25:47.000Z | 2021-08-09T14:17:49.000Z | src/data_loader.py | Tubbz-alt/Trinity-1 | 3bc01fa6a6dc5a3e783f5ce1ccd61b4fe1ea5998 | [
"MIT"
] | 1 | 2020-12-16T09:53:21.000Z | 2020-12-16T09:53:21.000Z | import torch
import sklearn.datasets as sklearn_datasets
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
import os
import numpy as np
#from models import DataGenerator
torch.manual_seed(25)
np.random.seed(1000)
def getSVHN(batch_size, TF, data_root='/tmp/public_dataset/pytorch', train=True, val=True, **kwargs):
data_root = os.path.expanduser(os.path.join(data_root, 'svhn-data'))
num_workers = kwargs.setdefault('num_workers', 1)
kwargs.pop('input_size', None)
def target_transform(target):
new_target = target - 1
if new_target == -1:
new_target = 9
return new_target
ds = []
if train:
train_loader = torch.utils.data.DataLoader(
datasets.SVHN(
root=data_root, split='train', download=True,
transform=TF,
),
batch_size=batch_size, shuffle=False, **kwargs)
ds.append(train_loader)
if val:
test_loader = torch.utils.data.DataLoader(
datasets.SVHN(
root=data_root, split='test', download=True,
transform=TF,
),
batch_size=batch_size, shuffle=False, **kwargs)
ds.append(test_loader)
ds = ds[0] if len(ds) == 1 else ds
return ds
def getCIFAR10(batch_size, TF, data_root='/tmp/public_dataset/pytorch', train=True, val=True, **kwargs):
data_root = os.path.expanduser(os.path.join(data_root, 'cifar10-data'))
num_workers = kwargs.setdefault('num_workers', 1)
kwargs.pop('input_size', None)
ds = []
if train:
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(
root=data_root, train=True, download=True,
transform=TF),
batch_size=batch_size, shuffle=False, **kwargs)
ds.append(train_loader)
if val:
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(
root=data_root, train=False, download=True,
transform=TF),
batch_size=batch_size, shuffle=False, **kwargs)
ds.append(test_loader)
ds = ds[0] if len(ds) == 1 else ds
return ds
def getCIFAR100(batch_size, TF, data_root='/tmp/public_dataset/pytorch', train=True, val=True, **kwargs):
data_root = os.path.expanduser(os.path.join(data_root, 'cifar100-data'))
num_workers = kwargs.setdefault('num_workers', 1)
kwargs.pop('input_size', None)
ds = []
if train:
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR100(
root=data_root, train=True, download=True,
transform=TF),
batch_size=batch_size, shuffle=False, **kwargs)
ds.append(train_loader)
if val:
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR100(
root=data_root, train=False, download=True,
transform=TF),
batch_size=batch_size, shuffle=False, **kwargs)
ds.append(test_loader)
ds = ds[0] if len(ds) == 1 else ds
return ds
def get_indices(dataset,class_num, num_oods):
indices = []
count = 0
for i in range(len(dataset.targets)):
for j in range(len(class_num)):
if dataset.targets[i] == class_num[j] and count < num_oods:
indices.append(i)
count = count + 1
return indices
# for getting a subset of CIFAR100 containing data for specific classes with the class ids specified in idx
def getSubsetCIFAR100(batch_size, TF, class_labels, num_oods, data_root='/tmp/public_dataset/pytorch', train=True, val=True, **kwargs):
data_root = os.path.expanduser(os.path.join(data_root, 'cifar100-data'))
num_workers = kwargs.setdefault('num_workers', 1)
kwargs.pop('input_size', None)
dataset = datasets.CIFAR100(root=data_root, train=True, download=True,transform=TF)
#print(dataset.classes, dataset.class_to_idx)
class_indices = get_indices(dataset, class_labels, num_oods)
ds = []
if train:
train_loader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size, shuffle=False, sampler = torch.utils.data.sampler.SubsetRandomSampler(class_indices), **kwargs)
ds.append(train_loader)
if val:
test_loader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size, shuffle=False, sampler = torch.utils.data.sampler.SubsetRandomSampler(class_indices), **kwargs)
ds.append(test_loader)
ds = ds[0] if len(ds) == 1 else ds
return ds
def getGerman(batch_size, TF, data_root="datasets/GTSRB-Train/Final_Training/Images", train=True, val=True, **kwargs):
num_workers = kwargs.setdefault('num_workers', 1)
kwargs.pop('input_size', None)
ds = []
if train:
dataset = GermanTrafficData(root="datasets/GTSRB-Train/Final_Training/Images", img_size=32, train=True)
train_loader = DataLoader(dataset,
batch_size= batch_size,
shuffle = False,
drop_last=False)
ds.append(train_loader)
if val:
dataset = GermanTrafficData(root="datasets/GTSRB-Test/Final_Test/Images", img_size=32, train=False)
test_loader = DataLoader(dataset,
batch_size= batch_size,
shuffle = False,
drop_last=False)
ds.append(test_loader)
ds = ds[0] if len(ds) == 1 else ds
return ds
def getTargetDataSet(data_type, batch_size, input_TF, dataroot):
if data_type == 'cifar10':
train_loader, test_loader = getCIFAR10(batch_size=batch_size, TF=input_TF, data_root=dataroot, num_workers=1)
elif data_type == 'cifar100':
train_loader, test_loader = getCIFAR100(batch_size=batch_size, TF=input_TF, data_root=dataroot, num_workers=1)
elif data_type == 'svhn':
train_loader, test_loader = getSVHN(batch_size=batch_size, TF=input_TF, data_root=dataroot, num_workers=1)
elif data_type == 'subset_cifar100':
print("Out_idx: ", kwargs['idx'])
train_loader, test_loader = getSubsetCIFAR100(batch_size=batch_size, TF=input_TF, class_labels=kwargs['idx'], num_oods=kwargs['num_oods'], data_root=dataroot, num_workers=1)
# for idx, (data, target) in enumerate(test_loader):
elif data_type == 'german':
train_loader, test_loader = getGerman(batch_size=batch_size, TF=input_TF, data_root=dataroot, num_workers=1)
elif data_type == 'imagenet_resize':
dataroot = os.path.expanduser(os.path.join(dataroot, 'Imagenet_resize'))
testsetout = datasets.ImageFolder(dataroot, transform=input_TF)
test_loader = torch.utils.data.DataLoader(testsetout, batch_size=batch_size, shuffle=False, num_workers=1)
train_loader = test_loader
elif data_type == 'lsun_resize':
dataroot = os.path.expanduser(os.path.join(dataroot, 'LSUN_resize'))
testsetout = datasets.ImageFolder(dataroot, transform=input_TF)
test_loader = torch.utils.data.DataLoader(testsetout, batch_size=batch_size, shuffle=False, num_workers=1)
train_loader = test_loader
'''
elif data_type == 'toy_data': # half_moon dataset
data,target = sklearn_datasets.make_moons(100*2,noise=.05, random_state=200, shuffle=False)
data = data.astype(np.float32)
data = torch.from_numpy(data)
target = torch.from_numpy(target)
train_loader = DataGenerator(data,target,batch_size=batch_size)
test_data,test_target = sklearn_datasets.make_moons(200*2,noise=0.05,random_state=500, shuffle=False)
test_data = test_data.astype(np.float32)
test_data = torch.from_numpy(test_data)
test_target = torch.from_numpy(test_target)
test_loader = DataGenerator(test_data,test_target,batch_size=batch_size)
elif data_type == 'blob': # blob dataset as OODs for half_moon toy dataset. This is called from ADV_Samples.py for LID scores
test_data, test_target = sklearn_datasets.make_blobs(n_samples=[120,60,30],centers = [[0,2],[1.95,2],[0.5,0.25]],cluster_std=[0.1,0.03,0.02],shuffle=False,random_state=200)
test_data = test_data.astype(np.float32)
test_data = torch.from_numpy(test_data)
test_target = torch.from_numpy(test_target)
test_loader = DataGenerator(test_data,test_target,batch_size=batch_size)
train_loader = test_loader
'''
return train_loader, test_loader
def getNonTargetDataSet(data_type, batch_size, input_TF, dataroot, **kwargs):
print("data_type: ", data_type)
if data_type == 'cifar10':
_, test_loader = getCIFAR10(batch_size=batch_size, TF=input_TF, data_root=dataroot, num_workers=1)
elif data_type == 'svhn':
_, test_loader = getSVHN(batch_size=batch_size, TF=input_TF, data_root=dataroot, num_workers=1)
elif data_type == 'cifar100':
_, test_loader = getCIFAR100(batch_size=batch_size, TF=input_TF, data_root=dataroot, num_workers=1)
elif data_type == 'subset_cifar100':
print("kwargs ", kwargs)
_, test_loader = getSubsetCIFAR100(batch_size=batch_size, TF=input_TF, class_labels=kwargs['idx'], num_oods=kwargs['num_oods'], data_root=dataroot, num_workers=1)
# for idx, (data, target) in enumerate(test_loader):
elif data_type == 'german':
_, test_loader = getGerman(batch_size=batch_size, TF=input_TF, data_root=dataroot, num_workers=1)
elif data_type == 'imagenet_resize':
dataroot = os.path.expanduser(os.path.join(dataroot, 'Imagenet_resize'))
testsetout = datasets.ImageFolder(dataroot, transform=input_TF)
test_loader = torch.utils.data.DataLoader(testsetout, batch_size=batch_size, shuffle=False, num_workers=1)
elif data_type == 'lsun_resize':
dataroot = os.path.expanduser(os.path.join(dataroot, 'LSUN_resize'))
testsetout = datasets.ImageFolder(dataroot, transform=input_TF)
test_loader = torch.utils.data.DataLoader(testsetout, batch_size=batch_size, shuffle=False, num_workers=1)
elif data_type == 'blob': # blob dataset as OODs for half_moon toy dataset
test_data, test_target = sklearn_datasets.make_blobs(n_samples=[120,60,30],centers = [[0,2],[1.95,2],[0.5,0.25]],cluster_std=[0.1,0.03,0.02],shuffle=False,random_state=200) #all three orig
#test_data, test_target = sklearn_datasets.make_blobs(n_samples=[120,60,30],centers = [[0,1.6],[1.95,2],[0.5,0.25]],cluster_std=[0.1,0.03,0.02],shuffle=True,random_state=200) #all three
#test_data, test_target = sklearn_datasets.make_blobs(n_samples=[120],centers = [[0.5,0.25]],cluster_std=[0.02],shuffle=False,random_state=200) #knn
#test_data, test_target = sklearn_datasets.make_blobs(n_samples=[120],centers = [[1.95,2]],cluster_std=[0.03],shuffle=False,random_state=200) #top right
print(test_target)
test_data = test_data.astype(np.float32)
test_data = torch.from_numpy(test_data)
test_target = torch.from_numpy(test_target)
test_loader = DataGenerator(test_data,test_target,batch_size=batch_size)
return test_loader
| 48.267241 | 196 | 0.672977 | 1,517 | 11,198 | 4.736981 | 0.10679 | 0.078903 | 0.054551 | 0.070136 | 0.817144 | 0.782215 | 0.779989 | 0.7484 | 0.7484 | 0.746451 | 0 | 0.028284 | 0.210663 | 11,198 | 231 | 197 | 48.47619 | 0.784704 | 0.073585 | 0 | 0.642045 | 0 | 0 | 0.069353 | 0.025209 | 0 | 0 | 0 | 0 | 0 | 1 | 0.051136 | false | 0 | 0.034091 | 0 | 0.136364 | 0.022727 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
3b55ae56addc6fc5b99392ec4da1d6e7abad2af7 | 14,809 | py | Python | agent/agent.py | NixonZ/QNetwork-RL | acf34dd8d598104267da88f3eacc3e44f06265a7 | [
"MIT"
] | 1 | 2021-07-17T14:49:51.000Z | 2021-07-17T14:49:51.000Z | agent/agent.py | NixonZ/QNetwork-RL | acf34dd8d598104267da88f3eacc3e44f06265a7 | [
"MIT"
] | null | null | null | agent/agent.py | NixonZ/QNetwork-RL | acf34dd8d598104267da88f3eacc3e44f06265a7 | [
"MIT"
] | null | null | null | import torch
from torch.jit.frontend import NotSupportedError
import torch.nn as nn
from torch_geometric.nn import MessagePassing
from torch_geometric.data import Data
import numpy as np
def get_default_device():
"""Pick GPU if available, else CPU"""
if torch.cuda.is_available():
return torch.device('cuda')
else:
return torch.device('cpu')
device = get_default_device()
class MPNN(MessagePassing):
def __init__(self,node_embedding_dim,edge_embedding_dim,hidden_node_dim,mode = 'forward'):
super(MPNN,self).__init__(aggr="add",flow = 'source_to_target' if mode == 'forward' else 'target_to_source',node_dim=0)
p = node_embedding_dim[0]
b = node_embedding_dim[1]
self.reduce = nn.Sequential(
nn.Conv2d(2,10,(p,1),padding=(p,0)),
nn.Conv2d(10,50,(p,b//2+1)),
nn.Conv2d(50,5,(p,b//4),stride=(1,b//16)),
nn.Flatten(start_dim=1),
nn.Linear(5*3*5, hidden_node_dim)
)
self.node_data = nn.ModuleList( [ nn.Linear(hidden_node_dim + edge_embedding_dim,b) for _ in range(p) ] )
def forward(self,x,edge_attr,edge_index):
'''
x : [|V|, node_embedding_dim]
edge_attr : [|E|, edge_embedding_dim]
edge_index : [2,|E|]
'''
return self.propagate(edge_index, x = x, edge_attr = edge_attr)
def message(self,x_i,x_j,edge_attr):
temp = torch.cat((x_i.unsqueeze(1),x_j.unsqueeze(1)),dim=1)
x = self.reduce.forward(temp)
x = torch.cat((x,edge_attr.unsqueeze(-1)),dim=1)
temp = []
for i in range(self.node_data.__len__()):
temp.append(self.node_data[i].forward(x).unsqueeze(1))
return torch.cat(temp,dim=1).to(device)
class Graph_Representation(nn.Module):
def __init__(self,node_embedding_dim,edge_embedding_dim,hidden_node_dim,graph_dim = 50,prop_steps = 2):
super(Graph_Representation,self).__init__()
p = node_embedding_dim[0]
b = node_embedding_dim[1]
# Message Passing Layers
self.prop_steps = prop_steps
self.forward_message = MPNN(node_embedding_dim,edge_embedding_dim,hidden_node_dim,mode='forward')
self.backward_message = MPNN(node_embedding_dim,edge_embedding_dim,hidden_node_dim,mode='backward')
# self.MPN_list = nn.ModuleList( [ MPNN(node_embedding_dim,edge_embedding_dim,hidden_node_dim) for _ in range(prop_steps) ] )
# Node Dimensionality reduction
self.reduce = nn.Sequential(
nn.Conv2d(1,10,(p,1),padding=(p,0)),
nn.Conv2d(10,50,(p,b//2+1)),
nn.Conv2d(50,5,(p,b//4),stride=(1,b//16)),
nn.Flatten(start_dim=1),
nn.Linear(5*3*5, hidden_node_dim)
)
# Learning Graph representation Layers
self.gm = nn.Linear(hidden_node_dim,graph_dim)
self.fm = nn.Linear(hidden_node_dim,graph_dim)
def forward(self,batch:Data):
'''
batch : Batch
'''
edge_index = batch.edge_index
edge_attr = batch.edge_attr
for i in range(self.prop_steps):
x = batch.x
batch.x = self.forward_message.forward(x,edge_attr,edge_index) + self.backward_message.forward(x,edge_attr,edge_index)
x = self.reduce(batch.x.unsqueeze(1))
g = torch.sigmoid(self.gm(x))
h_v_G = self.fm(x)
h_G = torch.sum( g * h_v_G , dim = 0 )
return h_G
class Agent(nn.Module):
def __init__(self,agent_type,node_embedding_dim,M,edge_embedding_dim,hidden_node_dim,graph_dim = 50,prop_steps = 2):
super(Agent,self).__init__()
p = node_embedding_dim[0]
b = node_embedding_dim[1]
self.M = M # Max num of servers.
self.agent_type = agent_type
self.f_G_actionvalue = Graph_Representation(node_embedding_dim,edge_embedding_dim,hidden_node_dim,graph_dim,prop_steps)
self.f_G_policy = Graph_Representation(node_embedding_dim,edge_embedding_dim,hidden_node_dim,graph_dim,prop_steps)
if agent_type == "add node":
'''
k ∈ [0,M]
xk ∈ R^(pxb)
'''
self.policy_network = nn.ModuleList( [ nn.Sequential(nn.Linear(graph_dim+1,b),nn.Softplus(beta=0.1)) for _ in range(p) ] )
self.action_value = nn.Linear(graph_dim+1+b*p,1)
elif agent_type == "add edge":
'''
k ∈ [1,2^n-1]
xk ∈ R
'''
self.policy_network = nn.Sequential(nn.Linear(graph_dim+1,1),nn.Softplus(beta=0.1))
self.action_value = nn.Linear(graph_dim+1+1,1)
elif agent_type == "edit nodes":
'''
k ∈ [1,n]
xk ∈ R^(pxb)
'''
self.policy_network = nn.ModuleList( [ nn.Sequential(nn.Linear(graph_dim+1,b),nn.Softplus(beta=0.1)) for _ in range(p) ] )
self.action_value = nn.Linear(graph_dim+1+b*p,1)
elif agent_type == "edit weights":
'''
k ∈ {(i,j)|i<j}
xk ∈ R
'''
self.policy_network = nn.Sequential(nn.Linear(graph_dim+2,1),nn.Sigmoid())
self.action_value = nn.Linear(graph_dim+2+1,1)
else:
raise NotSupportedError
self.theta_param = nn.ModuleList( [self.f_G_policy, self.policy_network] )
self.w_param = nn.ModuleList( [self.f_G_actionvalue,self.action_value] )
def action(self,batch: Data,done):
if done:
n = batch.num_nodes
else:
n = batch.num_nodes - 1
h_G_actionvalue = self.f_G_actionvalue.forward(batch)
h_G_policy = self.f_G_policy.forward(batch)
max_Q = torch.tensor(-1.0*np.inf)
optimal_k = None
optimal_xk = None
if self.agent_type == "add node":
'''
k ∈ [0,M]
xk ∈ R^(pxb)
'''
for k in range(self.M+1):
k_ = torch.tensor([k]).to(device)
x = torch.cat([h_G_policy,k_])
temp = []
for i in range(self.policy_network.__len__()):
temp.append(self.policy_network[i].forward(x).unsqueeze(0))
xk = torch.cat(temp,dim=0)
x = torch.cat([h_G_actionvalue,k_,xk.flatten()])
Q = self.action_value.forward(x)
if Q > max_Q:
max_Q = Q
optimal_k = k
optimal_xk = xk
elif self.agent_type == "add edge":
'''
k ∈ [1,2^n-1]
xk ∈ R
'''
for k in range(0,2**n-2):
k_ = torch.tensor([k]).to(device)
x = torch.cat([h_G_policy,k_])
xk = self.policy_network.forward(x)
x = torch.cat([h_G_actionvalue,k_,xk])
Q = self.action_value.forward(x)
if Q > max_Q:
max_Q = Q
optimal_k = k
optimal_xk = xk
elif self.agent_type == "edit nodes":
'''
k ∈ [1,n]
xk ∈ R^(pxb)
'''
if not(done):
n += 1
for k in range(n):
k_ = torch.tensor([k]).to(device)
x = torch.cat([h_G_policy,k_])
temp = []
for i in range(self.policy_network.__len__()):
temp.append(self.policy_network[i].forward(x).unsqueeze(0))
xk = torch.cat(temp,dim=0)
x = torch.cat([h_G_actionvalue,k_,xk.flatten()])
Q = self.action_value.forward(x)
if Q > max_Q:
max_Q = Q
optimal_k = k
optimal_xk = xk
elif self.agent_type == "edit weights":
'''
k ∈ {(i,j)|i<j}
xk ∈ R:[0,1]
'''
if not(done):
n += 1
for i in range(n-1):
for j in range(i+1,n):
k = [i,j]
k_ = torch.tensor(k).to(device)
x = torch.cat([h_G_policy,k_])
xk = self.policy_network.forward(x)
x = torch.cat([h_G_actionvalue,k_,xk])
Q = self.action_value.forward(x)
if Q > max_Q:
max_Q = Q
optimal_k = k
optimal_xk = xk
else:
raise NotSupportedError
return max_Q,optimal_k,optimal_xk
def rn_action(self,batch: Data,k: int):
h_G_actionvalue = self.f_G_actionvalue.forward(batch)
h_G_policy = self.f_G_policy.forward(batch)
if self.agent_type == "add node":
'''
k ∈ [0,M]
xk ∈ R^(pxb)
'''
k_ = torch.tensor([k]).to(device)
x = torch.cat([h_G_policy,k_])
temp = []
for i in range(self.policy_network.__len__()):
temp.append(self.policy_network[i].forward(x).unsqueeze(0))
xk = torch.cat(temp,dim=0)
x = torch.cat([h_G_actionvalue,k_,xk.flatten()])
Q = self.action_value.forward(x)
elif self.agent_type == "add edge":
'''
k ∈ [1,2^n-1]
xk ∈ R
'''
k_ = torch.tensor([k]).to(device)
x = torch.cat([h_G_policy,k_])
xk = self.policy_network.forward(x)
x = torch.cat([h_G_actionvalue,k_,xk])
Q = self.action_value.forward(x)
elif self.agent_type == "edit nodes":
'''
k ∈ [1,n]
xk ∈ R^(pxb)
'''
k_ = torch.tensor([k]).to(device)
x = torch.cat([h_G_policy,k_])
temp = []
for i in range(self.policy_network.__len__()):
temp.append(self.policy_network[i].forward(x).unsqueeze(0))
xk = torch.cat(temp,dim=0)
x = torch.cat([h_G_actionvalue,k_,xk.flatten()])
Q = self.action_value.forward(x)
elif self.agent_type == "edit weights":
'''
k ∈ [1,n]
xk ∈ R
'''
k_ = torch.tensor(k).to(device)
x = torch.cat([h_G_policy,k_])
xk = self.policy_network.forward(x)
x = torch.cat([h_G_actionvalue,k_,xk])
Q = self.action_value.forward(x)
else:
raise NotSupportedError
return xk,Q
def action_value_calc(self,batch : Data,k,xk):
h_G_actionvalue = self.f_G_actionvalue.forward(batch)
if self.agent_type == "add node":
'''
k ∈ [0,M]
xk ∈ R^(pxb)
'''
k_ = torch.tensor([k]).to(device)
x = torch.cat([h_G_actionvalue,k_])
x = torch.cat([x,xk.flatten()])
Q = self.action_value.forward(x)
elif self.agent_type == "add edge":
'''
k ∈ [1,2^n-1]
xk ∈ R
'''
x = torch.cat([h_G_actionvalue,torch.tensor([k]).to(device)])
x = torch.cat([x,xk])
Q = self.action_value.forward(x)
elif self.agent_type == "edit nodes":
'''
k ∈ [1,n]
xk ∈ R^(pxb)
'''
x = torch.cat([h_G_actionvalue,torch.tensor([k]).to(device)])
x = torch.cat([x,xk.flatten()])
Q = self.action_value.forward(x)
elif self.agent_type == "edit weights":
'''
k ∈ [1,n]
xk ∈ R
'''
x = torch.cat([h_G_actionvalue,torch.tensor(k).to(device)])
x = torch.cat([x,xk])
Q = self.action_value.forward(x)
else:
raise NotSupportedError
return Q
def Q_hat(self,batch: Data,done):
if done:
n = batch.num_nodes
else:
n = batch.num_nodes - 1
with torch.no_grad():
h_G_actionvalue = self.f_G_actionvalue.forward(batch)
h_G_policy = self.f_G_policy.forward(batch)
Q_sum = torch.zeros((1),device=device,dtype=torch.float64)
if self.agent_type == "add node":
'''
k ∈ [0,M]
xk ∈ R^(pxb)
'''
for k in range(self.M+1):
k_ = torch.tensor([k]).to(device)
x = torch.cat([h_G_policy,k_])
temp = []
for i in range(self.policy_network.__len__()):
temp.append(self.policy_network[i].forward(x).unsqueeze(0))
xk = torch.cat(temp,dim=0)
x = torch.cat([h_G_actionvalue,k_,xk.flatten()])
Q_sum += self.action_value.forward(x)
elif self.agent_type == "add edge":
'''
k ∈ [1,2^n-1]
xk ∈ R
'''
for k in range(0,2**n-2):
k_ = torch.tensor([k]).to(device)
x = torch.cat([h_G_policy,k_])
xk = self.policy_network.forward(x)
x = torch.cat([h_G_actionvalue,k_,xk])
Q_sum += self.action_value.forward(x)
elif self.agent_type == "edit nodes":
'''
k ∈ [1,n]
xk ∈ R^(pxb)
'''
if not(done):
n += 1
for k in range(n):
k_ = torch.tensor([k]).to(device)
x = torch.cat([h_G_policy,k_])
temp = []
for i in range(self.policy_network.__len__()):
temp.append(self.policy_network[i].forward(x).unsqueeze(0))
xk = torch.cat(temp,dim=0)
x = torch.cat([h_G_actionvalue,k_,xk.flatten()])
Q_sum += self.action_value.forward(x)
elif self.agent_type == "edit weights":
'''
k ∈ {(i,j)|i<j}
xk ∈ R
'''
if not(done):
n += 1
for i in range(n-1):
for j in range(i+1,n):
k = [i,j]
k_ = torch.tensor(k).to(device)
x = torch.cat([h_G_policy,k_])
xk = self.policy_network.forward(x)
x = torch.cat([h_G_actionvalue,k_,xk.flatten()])
Q_sum += self.action_value.forward(x)
else:
raise NotSupportedError
return Q_sum | 31.575693 | 134 | 0.49605 | 1,954 | 14,809 | 3.553224 | 0.07523 | 0.047242 | 0.042777 | 0.040328 | 0.761918 | 0.74982 | 0.728648 | 0.704739 | 0.70013 | 0.695809 | 0 | 0.017182 | 0.37511 | 14,809 | 469 | 135 | 31.575693 | 0.728766 | 0.024917 | 0 | 0.70412 | 0 | 0 | 0.019977 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041199 | false | 0.007491 | 0.022472 | 0 | 0.108614 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.