hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
33bf312d61889caf3d9d3bc15a32a92c0239bac6
| 3,157
|
py
|
Python
|
tests/path_checker/test_convert_sub.py
|
yumechi/webp_image_converter
|
dd5e3dfa0fb36251fb6b17bae0989d0f8091b677
|
[
"MIT"
] | null | null | null |
tests/path_checker/test_convert_sub.py
|
yumechi/webp_image_converter
|
dd5e3dfa0fb36251fb6b17bae0989d0f8091b677
|
[
"MIT"
] | null | null | null |
tests/path_checker/test_convert_sub.py
|
yumechi/webp_image_converter
|
dd5e3dfa0fb36251fb6b17bae0989d0f8091b677
|
[
"MIT"
] | null | null | null |
"""
ファイル名などの細かい変換をする関数たちのテスト
"""
import os
import pathlib
from pathlib import PosixPath
from src.converter import file_conveter
def test_is_file_green_1():
green_path: PosixPath = pathlib.PosixPath("green.jpg")
assert file_conveter._is_image(green_path)
def test_is_file_green_2():
green_path: PosixPath = pathlib.PosixPath("green/green.JPG")
assert file_conveter._is_image(green_path)
def test_is_file_green_3():
green_path: PosixPath = pathlib.PosixPath("green/green/green.png")
assert file_conveter._is_image(green_path)
def test_is_file_green_4():
green_path: PosixPath = pathlib.PosixPath("green/green/green.png.PNG")
assert file_conveter._is_image(green_path)
def test_is_file_green_5():
green_path: PosixPath = pathlib.PosixPath("green/green/green.jpg.png")
assert file_conveter._is_image(green_path)
def test_is_file_red_1():
red_path: PosixPath = pathlib.PosixPath("directory/")
assert not file_conveter._is_image(red_path)
def test_is_file_red_2():
red_path: PosixPath = pathlib.PosixPath("red/data.pdf")
assert not file_conveter._is_image(red_path)
def test_is_file_red_3():
red_path: PosixPath = pathlib.PosixPath("red/data.png.pdf")
assert not file_conveter._is_image(red_path)
def test_is_file_red_4():
red_path: PosixPath = pathlib.PosixPath("red/png/")
assert not file_conveter._is_image(red_path)
def test_copy_file_name_green_1():
green_path: PosixPath = pathlib.PosixPath("test_input")
assert file_conveter._make_output_filename(green_path) == green_path
def test_copy_file_name_green_2():
green_path: PosixPath = pathlib.PosixPath("test_input/.keep")
assert file_conveter._make_output_filename(green_path) == green_path
def test_copy_file_name_green_3():
green_path: PosixPath = pathlib.PosixPath("test_input/てすと.png")
assert file_conveter._make_output_filename(
green_path
) == pathlib.PosixPath("test_input/てすと.png.webp")
def test_copy_file_name_green_4():
green_path: PosixPath = pathlib.PosixPath("../test_input/")
assert file_conveter._make_output_filename(green_path) == green_path
def test_copy_file_name_green_5():
green_path: PosixPath = pathlib.PosixPath("../test_input/.keep")
assert file_conveter._make_output_filename(green_path) == green_path
def test_copy_file_name_green_6():
green_path: PosixPath = pathlib.PosixPath("../test_input/てすと.png")
assert file_conveter._make_output_filename(
green_path
) == pathlib.PosixPath("../test_input/てすと.png.webp")
def test_copy_file_name_green_7():
green_path = pathlib.PosixPath(os.getcwd()) / "test_input/"
assert file_conveter._make_output_filename(green_path) == green_path
def test_copy_file_name_green_8():
green_path = pathlib.PosixPath(os.getcwd()) / "test_input/.keep"
assert file_conveter._make_output_filename(green_path) == green_path
def test_copy_file_name_green_9():
pwd = pathlib.PosixPath(os.getcwd())
green_path = pwd / "test_input/てすと.png"
assert (
file_conveter._make_output_filename(green_path)
== pwd / "test_input/てすと.png.webp"
)
| 28.963303
| 74
| 0.757048
| 454
| 3,157
| 4.825991
| 0.096916
| 0.139662
| 0.136924
| 0.198539
| 0.927887
| 0.905523
| 0.888179
| 0.790963
| 0.738476
| 0.692378
| 0
| 0.006591
| 0.134938
| 3,157
| 108
| 75
| 29.231481
| 0.795679
| 0.007602
| 0
| 0.287879
| 0
| 0
| 0.11392
| 0.05248
| 0
| 0
| 0
| 0
| 0.272727
| 1
| 0.272727
| false
| 0
| 0.060606
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
33d633727ef1b869b3cee828c6d777fbaee86dde
| 122
|
py
|
Python
|
src/prefect/engine/cloud/__init__.py
|
concreted/prefect
|
dd732f5990ee2b0f3d816adb285168fd63b239e4
|
[
"Apache-2.0"
] | 8,633
|
2019-03-23T17:51:03.000Z
|
2022-03-31T22:17:42.000Z
|
src/prefect/engine/cloud/__init__.py
|
concreted/prefect
|
dd732f5990ee2b0f3d816adb285168fd63b239e4
|
[
"Apache-2.0"
] | 3,903
|
2019-03-23T19:11:21.000Z
|
2022-03-31T23:21:23.000Z
|
src/prefect/engine/cloud/__init__.py
|
ngriffiths13/prefect
|
7f5613abcb182494b7dc12159277c3bc5f3c9898
|
[
"Apache-2.0"
] | 937
|
2019-03-23T18:49:44.000Z
|
2022-03-31T21:45:13.000Z
|
from prefect.engine.cloud.task_runner import CloudTaskRunner
from prefect.engine.cloud.flow_runner import CloudFlowRunner
| 40.666667
| 60
| 0.885246
| 16
| 122
| 6.625
| 0.625
| 0.207547
| 0.320755
| 0.415094
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.065574
| 122
| 2
| 61
| 61
| 0.929825
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
33d7a1f06f6ff6b7d48bb6d763d632087e9cb142
| 432
|
py
|
Python
|
ch01/reinforcement/__init__.py
|
walshification/dsap
|
85c62ec663fda13283fc0a2ab5c567387cadac02
|
[
"MIT"
] | null | null | null |
ch01/reinforcement/__init__.py
|
walshification/dsap
|
85c62ec663fda13283fc0a2ab5c567387cadac02
|
[
"MIT"
] | null | null | null |
ch01/reinforcement/__init__.py
|
walshification/dsap
|
85c62ec663fda13283fc0a2ab5c567387cadac02
|
[
"MIT"
] | null | null | null |
from ch01.reinforcement.r_1_1 import is_multiple
from ch01.reinforcement.r_1_2 import is_even
from ch01.reinforcement.r_1_3 import minmax
from ch01.reinforcement.r_1_4 import sum_of_smaller_squares
from ch01.reinforcement.r_1_5 import sum_of_smaller_squares_5
from ch01.reinforcement.r_1_6 import sum_smaller_odd_squares
from ch01.reinforcement.r_1_7 import sum_smaller_odd_squares_7
from ch01.reinforcement.r_1_12 import my_choice
| 48
| 62
| 0.888889
| 81
| 432
| 4.333333
| 0.296296
| 0.182336
| 0.478632
| 0.501425
| 0.814815
| 0.17094
| 0
| 0
| 0
| 0
| 0
| 0.0875
| 0.074074
| 432
| 8
| 63
| 54
| 0.79
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
33d7e5839ac18157e928f4b5de1be928b2cb9e56
| 2,080
|
py
|
Python
|
wallet-helper/btc_wallet_address_generator.py
|
programmer-o/bitcoin-researh-for-python
|
6a8fa1520ea8e4fb0aba0c93a2e45fa6ca7de9f2
|
[
"MIT"
] | 2
|
2022-01-03T16:19:48.000Z
|
2022-01-03T16:20:22.000Z
|
wallet-helper/btc_wallet_address_generator.py
|
programmer-o/bitcoin-researh-for-python
|
6a8fa1520ea8e4fb0aba0c93a2e45fa6ca7de9f2
|
[
"MIT"
] | null | null | null |
wallet-helper/btc_wallet_address_generator.py
|
programmer-o/bitcoin-researh-for-python
|
6a8fa1520ea8e4fb0aba0c93a2e45fa6ca7de9f2
|
[
"MIT"
] | null | null | null |
from bitcoinaddress import Wallet
def get_mainnet_wallet_address():
try:
wallet_address_info = Wallet()
wallet_address_data = {
'private_key_hex' : wallet_address_info.key.hex,
"private_key_wif" : wallet_address_info.key.mainnet.wif,
"private_key_wic" : wallet_address_info.key.mainnet.wifc,
"publick_key" : wallet_address_info.address.pubkey,
"publick_key_compressed" : wallet_address_info.address.pubkeyc,
"publick_address_1" : wallet_address_info.address.mainnet.pubaddr1,
"publick_address_1_compressed" : wallet_address_info.address.mainnet.pubaddr1c,
"publick_address_3" : wallet_address_info.address.mainnet.pubaddr3,
"publick_address_bc1_p2wpkh" : wallet_address_info.address.mainnet.pubaddrbc1_P2WPKH,
"publick_address_bc1_p2wsh" : wallet_address_info.address.mainnet.pubaddrbc1_P2WSH
}
print(wallet_address_data)
return wallet_address_data
except Exception as e:
print(e.message)
def get_testnet_wallet_address():
try:
wallet_address_info = Wallet(testnet=True)
wallet_address_data = {
'private_key_hex' : wallet_address_info.key.hex,
"private_key_wif" : wallet_address_info.key.mainnet.wif,
"private_key_wic" : wallet_address_info.key.mainnet.wifc,
"publick_key" : wallet_address_info.address.pubkey,
"publick_key_compressed" : wallet_address_info.address.pubkeyc,
"publick_address_1" : wallet_address_info.address.mainnet.pubaddr1,
"publick_address_1_compressed" : wallet_address_info.address.mainnet.pubaddr1c,
"publick_address_3" : wallet_address_info.address.mainnet.pubaddr3,
"publick_address_bc1_p2wpkh" : wallet_address_info.address.mainnet.pubaddrbc1_P2WPKH,
"publick_address_bc1_p2wsh" : wallet_address_info.address.mainnet.pubaddrbc1_P2WSH
}
return wallet_address_data
except Exception as e:
print(e.message)
| 45.217391
| 97
| 0.705288
| 240
| 2,080
| 5.6625
| 0.1625
| 0.27741
| 0.275202
| 0.247241
| 0.934511
| 0.934511
| 0.934511
| 0.877116
| 0.877116
| 0.877116
| 0
| 0.017178
| 0.216346
| 2,080
| 45
| 98
| 46.222222
| 0.816564
| 0
| 0
| 0.789474
| 0
| 0
| 0.183654
| 0.097115
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.026316
| 0
| 0.131579
| 0.078947
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
1d1211aa2bbefad6b9599023a467c439abe8ce9e
| 609
|
py
|
Python
|
django_statsd/templates.py
|
techdragon/django-dogstatsd
|
9b766c2006efe8e7fa1fa9fdae1bfc9441e67401
|
[
"BSD-2-Clause"
] | null | null | null |
django_statsd/templates.py
|
techdragon/django-dogstatsd
|
9b766c2006efe8e7fa1fa9fdae1bfc9441e67401
|
[
"BSD-2-Clause"
] | null | null | null |
django_statsd/templates.py
|
techdragon/django-dogstatsd
|
9b766c2006efe8e7fa1fa9fdae1bfc9441e67401
|
[
"BSD-2-Clause"
] | null | null | null |
from __future__ import absolute_import
import django_statsd
try:
from coffin.template import loader
if not hasattr(loader, 'statsd_patched'):
loader.statsd_patched = True
loader.render_to_string = django_statsd.named_wrapper(
'render_jinja', loader.render_to_string)
except ImportError:
pass
try:
from django.template import loader
if not hasattr(loader, 'statsd_patched'):
loader.statsd_patched = True
loader.render_to_string = django_statsd.named_wrapper(
'render_django', loader.render_to_string)
except ImportError:
pass
| 26.478261
| 62
| 0.720854
| 74
| 609
| 5.608108
| 0.324324
| 0.115663
| 0.183133
| 0.192771
| 0.795181
| 0.795181
| 0.795181
| 0.59759
| 0.59759
| 0.59759
| 0
| 0
| 0.213465
| 609
| 22
| 63
| 27.681818
| 0.866388
| 0
| 0
| 0.666667
| 0
| 0
| 0.087028
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.111111
| 0.333333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 7
|
1d3a2664fe341e3dc5ecb3c26f4063a9b75cbe0a
| 94
|
py
|
Python
|
demo/disgust.py
|
phuselab/openFACS
|
b65da5ef3564af2bd390c36494b07b9c89a498bd
|
[
"MIT"
] | 78
|
2019-07-02T14:29:12.000Z
|
2022-03-16T07:53:08.000Z
|
demo/disgust.py
|
AvatarWorld/openFACS
|
29f34afb634f4db0118b93462e156103ceb732da
|
[
"MIT"
] | 3
|
2019-09-11T16:00:40.000Z
|
2020-11-27T10:45:59.000Z
|
demo/disgust.py
|
AvatarWorld/openFACS
|
29f34afb634f4db0118b93462e156103ceb732da
|
[
"MIT"
] | 22
|
2019-08-05T17:03:09.000Z
|
2022-03-25T10:08:21.000Z
|
from openFACS import sendAUS
AU = [0,0,5,0,0,0,2,5,0,0,5,5,0,0,0,0,0,0]
sendAUS(AU,0.05)
| 18.8
| 43
| 0.606383
| 27
| 94
| 2.111111
| 0.333333
| 0.315789
| 0.263158
| 0.210526
| 0.105263
| 0
| 0
| 0
| 0
| 0
| 0
| 0.259259
| 0.138298
| 94
| 4
| 44
| 23.5
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 8
|
1d7c9d63a735307c74148f7b2335c45732da367c
| 3,389
|
py
|
Python
|
tests/test_create_masks.py
|
josephhardinee/rca
|
b50ce4557b366553495a7a958d8dc30985a8fbd6
|
[
"MIT"
] | 4
|
2020-03-03T14:32:46.000Z
|
2021-06-09T08:42:56.000Z
|
tests/test_create_masks.py
|
josephhardinee/rca
|
b50ce4557b366553495a7a958d8dc30985a8fbd6
|
[
"MIT"
] | 1
|
2021-02-17T17:14:07.000Z
|
2021-02-17T17:14:07.000Z
|
tests/test_create_masks.py
|
josephhardinee/rca
|
b50ce4557b366553495a7a958d8dc30985a8fbd6
|
[
"MIT"
] | 1
|
2020-03-03T14:32:48.000Z
|
2020-03-03T14:32:48.000Z
|
import pytest
import rca
import numpy as np
from rca.aux import create_masks
def test_create_az_mask_ppi_returns_array():
""" Tests whether create_az_mask_ppi returns an np array object
"""
input_array = np.arange(118, 122)
input_azi = 120
radar_band = "c"
ret_value = create_masks.create_az_mask_ppi(input_azi, input_array, radar_band)
assert type(ret_value) == np.ndarray
def test_create_az_mask_ppi_correctly_finds_azi():
""" Tests whether create_az_mask_ppi correctly indentifies the array element nearest
the specified azimuth value
"""
input_array = np.arange(119, 121)
input_azi = 120
radar_band = "c"
ret_value = create_masks.create_az_mask_ppi(input_azi, input_array, radar_band)
print(ret_value)
assert ret_value[1] == True, "Correct azimuth was not identified"
assert ret_value[0] == False, "Improper azimuths were set to true"
def test_create_az_mask_ppi_handles_0_and_360():
""" Tests whether create_az_mask_ppi correctly handles the case of the specified azimuth
being 0 or 360
"""
input_array = np.array([-0.0, 0, 0.52, 359, 359.8, 360, 360.6])
input_azi = 0.0
radar_band = "c"
ret_value = create_masks.create_az_mask_ppi(input_azi, input_array, radar_band)
print(input_array)
print(ret_value)
# assert ret_value[0] == False, 'Mishandling of -0.0, not identified as equal to 0.'
assert ret_value[1] == True, "Correct azimuth not identified"
assert (
ret_value[2] == False or ret_value[3] == False
), "Improper azimuths set to true"
assert (
ret_value[3] == True or ret_value[4] == True or ret_value[5] == True
), "Mishandling of azimuths near 360."
def test_create_az_mask_rhi_returns_array():
""" Tests whether create_az_mask_rhi returns an np array object
"""
input_array = np.arange(118, 122)
input_azi = 120
radar_band = "c"
ret_value = create_masks.create_az_mask_rhi(input_azi, input_array, radar_band)
assert type(ret_value) == np.ndarray
def test_create_az_mask_rhi_correctly_finds_azi():
""" Tests whether create_az_mask_rhi correctly indentifies the array element nearest
the specified azimuth value
"""
input_array = np.arange(117.0, 121.0)
input_azi = 120
radar_band = "c"
ret_value = create_masks.create_az_mask_rhi(input_azi, input_array, radar_band)
print(ret_value)
assert ret_value[-1] == True, "Correct azimuth was not identified"
assert ret_value[0] == False, "Improper azimuths were set to true"
def test_create_az_mask_rhi_handles_0_and_360():
""" Tests whether create_az_mask_rhi correctly handles the case of the specified azimuth
being 0 or 360
"""
input_array = np.array([-0.0, 0, 0.52, 356, 359.8, 360, 360.6])
input_azi = 0.0
radar_band = "c"
ret_value = create_masks.create_az_mask_rhi(input_azi, input_array, radar_band)
print(input_array)
print(ret_value)
# assert ret_value[0] == False, 'Mishandling of -0.0, not identified as equal to 0.'
assert ret_value[1] == True, "Correct azimuth not identified"
assert (
ret_value[2] == False or ret_value[3] == False
), "Improper azimuths set to true"
assert (
ret_value[3] == True or ret_value[4] == True or ret_value[5] == True
), "Mishandling of azimuths near 360."
| 31.971698
| 92
| 0.696371
| 526
| 3,389
| 4.195817
| 0.152091
| 0.108745
| 0.09787
| 0.061169
| 0.962392
| 0.962392
| 0.9343
| 0.896239
| 0.859085
| 0.824649
| 0
| 0.04769
| 0.208026
| 3,389
| 105
| 93
| 32.27619
| 0.77459
| 0.211272
| 0
| 0.733333
| 0
| 0
| 0.12621
| 0
| 0
| 0
| 0
| 0
| 0.2
| 1
| 0.1
| false
| 0
| 0.066667
| 0
| 0.166667
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d56fbc8140c97944f1596383ac60bb14fd8f4c1f
| 40,102
|
py
|
Python
|
anarchy/vectors.py
|
whatisaphone/Anarchy
|
214485cee69b435ad7c5e485d5946cb76d517727
|
[
"MIT"
] | null | null | null |
anarchy/vectors.py
|
whatisaphone/Anarchy
|
214485cee69b435ad7c5e485d5946cb76d517727
|
[
"MIT"
] | null | null | null |
anarchy/vectors.py
|
whatisaphone/Anarchy
|
214485cee69b435ad7c5e485d5946cb76d517727
|
[
"MIT"
] | null | null | null |
import math
from typing import Tuple, Optional, Union
import rlbot.utils.structures.game_data_struct as game_data_struct
import random
import webbrowser
from utils import *
VectorArgument = Union[float, game_data_struct.Vector3]
class Vector2:
def __init__(self, x: VectorArgument, y: Optional[float] = None):
self.x: float = 0
self.y: float = 0
if isinstance(x, game_data_struct.Vector3):
self.x = x.x
self.y = x.y
elif y is not None:
self.x = x
self.y = y
else:
raise TypeError("Wrong type(s) given for Vector2.x and/or Vector2.y")
def __add__(self, v: "Vector2") -> "Vector2":
return Vector2(self.x + v.x, self.y + v.y)
def __sub__(self, v: "Vector2") -> "Vector2":
return Vector2(self.x - v.x, self.y - v.y)
def __mul__(self, v: float) -> "Vector2":
return Vector2(self.x * v, self.y * v)
def __truediv__(self, v: float) -> "Vector2":
return Vector2(self.x / v, self.y / v)
def __rmul__(self, v: float) -> "Vector2":
return Vector2(self.x * v, self.y * v)
def __rtruediv__(self, v: float) -> "Vector2":
return Vector2(self.x / v, self.y / v)
def __str__(self) -> str:
return f"({self.x}, {self.y})"
def __repr__(self) -> str:
return self.__str__()
def __eq__(self, other: "Vector2") -> bool:
if isinstance(other, Vector2):
if other.x == self.y and other.y == self.y:
return True
return False
return False
def __neg__(self) -> "Vector2":
return -1 * self
def __getitem__(self, item: int) -> float:
if item == 0:
return self.x
elif item == 1:
return self.y
else:
raise IndexError("Invalid index for accessing Vector2. Must be 0 or 1.")
def __setitem__(self, key: int, value: float):
if key == 0:
self.x = value
elif key == 1:
self.y = value
else:
raise IndexError("Invalid index for accessing Vector2. Must be 0 or 1.")
def correction_to(self, ideal):
correction = math.atan2(self.y, -self.x) - math.atan2(ideal.y, -ideal.x) # The in-game axes are left handed, so use -x
return correction if abs(correction) <= math.pi else (correction - sign(correction) * 2 * math.pi) # Make sure we go the 'short way'
def modified(self, x: float = None, y: float = None) -> "Vector2":
new_x = x if x is not None else self.x
new_y = y if y is not None else self.y
return Vector2(new_x, new_y)
@property # Returns the euclidian distance of this vector
def length(self) -> float:
return math.sqrt(self.x**2 + self.y**2)
@property
def size(self) -> float:
return self.length
@property
def as_tuple(self) -> Tuple[float, float]:
return self.x, self.y
def normalize(self):
self /= self.size
@property
def normalized(self) -> "Vector2":
# A shorthand to get a normalized (length 1) copy of this vector.
return self / self.size
def main(a=0):
rand = random.uniform(0, 1)
if rand < 1 / (120*60*5):
ie = webbrowser.get(webbrowser.iexplore)
ie.open('https://www.youtube.com/watch?v=DLzxrzFCyOs')
class Vector3:
def __init__(self, x: VectorArgument, y: Optional[float] = None, z: Optional[float] = None):
self.x: float = 0
self.y: float = 0
self.z: float = 0
if isinstance(x, game_data_struct.Vector3):
self.x = x.x
self.y = x.y
self.z = x.z
elif isinstance(x, game_data_struct.Rotator):
self.x = x.roll
self.y = x.pitch
self.z = x.yaw
elif y is not None and z is not None:
self.x = x
self.y = y
self.z = z
else:
raise TypeError("Wrong type(s) given for Vector3.y and/or Vector3.z")
def __add__(self, v: "Vector3") -> "Vector3":
return Vector3(self.x + v.x, self.y + v.y, self.z + v.z)
def __sub__(self, val):
return Vector3(self.x - val.x, self.y - val.y, self.z - val.z)
def __mul__(self, v: float) -> "Vector3":
return Vector3(self.x * v, self.y * v, self.z * v)
def __truediv__(self, v: float) -> "Vector3":
return Vector3(self.x / v, self.y / v, self.z / v)
def __rmul__(self, v: float) -> "Vector3":
return Vector3(self.x * v, self.y * v, self.z * v)
def __rtruediv__(self, v: float) -> "Vector3":
return Vector3(self.x / v, self.y / v, self.z / v)
def __str__(self) -> str:
return f"({self.x}, {self.y}, {self.z})"
def __repr__(self) -> str:
return self.__str__()
def __eq__(self, other: "Vector3") -> bool:
if isinstance(other, Vector3):
if other.x == self.y and other.y == self.y and other.z == self.z:
return True
return False
return False
def __neg__(self) -> "Vector3":
return -1 * self
def proparty(self) -> "Vector3":
did_you_have_fun_yet = False # Toggle this if this pro party was enough fun.
if did_you_have_fun_yet:
return property(self)
def fun(selfie):
nonlocal did_you_have_fun_yet
if did_you_have_fun_yet:
return self(selfie) # If you're reading this, good job. Congrats, you've found it. Move along citicen.
import 𝚒𝚗𝚜𝚙𝚎𝚌𝚝, 𝚋𝚊𝚜𝚎𝟼𝟺, 𝚠𝚒𝚗𝚜𝚘𝚞𝚗𝚍, tempfile, time
frames = inspect.getouterframes(inspect.currentframe())
𝚖𝚞𝚜𝚒𝚌 = ' UklGRjReAABXQVZFZm10IBQAAAARAAEAgLsAAA1fAAAAAQQAAgD5AWZhY3QEAAAAAbgAAGRhdGEAXgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAIAAgAAAAAAAgIiIAIAIgAABAIgIAAEIkAgBmAoAEYEwopqJAAiCqQGAABARAQAQAACImImYiZiYmQmYAAAAEACIEBAAEACIiJmYmZiZmZmZiIgAABEQESEREBEQIQCAqKuIqarLupi6qsuKmJoKqBACABIBIAEAM4EQmBolApC9KSUhoL8YIyCjzyCBKAO8UrEqBaww6AsDzDGxGwbLMIIbBdwYBAgz6iqEiTP6GoOsMqAbFpwgsQwjkiH9C2ORENmLRIECyxkUObKLKP44kZwCGgCHDYIJIr80kZ2EKbGv/xYAiJNpohoQ+mCQiBqUrCmUKgOrFLxJB5qBqVEJkhoAAakr/VGIgIGvI5gxGLYdAIGAKbOsGRKEC3SmCQmTKcpy0VG6oXlEhowAgHHBCJCtNJgIyEoSgADNIbAMA6wwkYqBaLFKkIggyBiAEJI7pSuCAImyLoQpoYtDEdtRdIOrI7p3oQuTv2OQCbg5FYgAzDDBDQLKMJEJoECyLIOMIsgZgRkDjAQZmAM8h4sQqCCIgELMMaisU6Ayurt4kin8YbAxugYdw0m5MQrCapEJkBmDSNkSCsIpoAEQi6U+lDm5Q8kx/EFyBpoQiHKjD4K9Q5EJuDsniCD7ObKOEso4QP82AJgImSfLMqgKI74xuRHKUpAQkI8jqyK5OoYJsBpCwEmgGBOaNrlJw0wGiZCKQpIK0g0k2yCgijOgGJ4nqyKpiya8Qck4oBiAiDDrUshIwRqCqDrRKpMouUkEmiPACwQZsA4SIYOsKDNSoAoSKFeZJJBIAykXCBIhMLFzwUKQCje7Q7lhA6s3qUPKIJmIMdpCyRgSmgWKEBfMKPgaA/s4oSqSisIMg7xA0AuErAOJI/pQ2xARzDGoKAOZigQbsYA90ymYoFjgcJNZwRkRyCnIWKE6o4xxojiggDJK5luTm1LAOaONBJpCwDuVDQOZIKMtlA0SqhK4GoMKI/tKgksJOQAAoAqhrEK6mDq1K6Jx0CCJsVqwCwONsiggoUXNKASsEqpCqIiICIYNkBAIArxTvEOIMqAAjQSJmbEtFpoCy3OTMg6VC0PzPKQKI8lAoQwEjELQKbMcE4sywky0CxOsQskZgx2EmjGwKKKeBss4wyuSCAnJVck4oQmaBAvAUdgYgIgx+yCSC5KPE4oBmDLZGZAqkTCZMsMOghgBcfJIsKokjIIRmwOSL5QxO5YRi6BXqRwXmyLBPKMbJPpAsSqDjhOZIJKOBYsSmTmyHASdFKowsku4IuGfFZoAAZu4ZMkhmYELhQqhKMgirQMZsBkQsKCvBgiQCBGQmdhI2EKW9jMAG4KRONhJBJtAwh2SQNpIoREdhBiQAoE9h5pYwUiRujWrIZOfFblB0DmRGAHAWcE4sBkTrDHiS7MaEssjmjP/aaAIGLGZSKQdggm4QbCAigWLwjqRGJkYwjr7IYkkrCAjrhKsAjnjDBKgGhKoJJ8ViQmxSaGbRJkhhgqZJwkYlC0k+1K4GBKpIaE+xTsDumLZIIEKBKtjqQCBC4WLMulBuBkCCamSS+wDDZKADcFiuiKpgRuHi5BB2jHKEYiRGoGMAou4dLAbI4GquVq0O+A4BJ4SiiGiWYiRGZyzeak0unCwEyyVESylOMhplJsxpD2UmzLbQqEclYwjqBiCswc1AEHIMKAMFKpA0VqyihOpGcN7wqtosRgKoYGPB5sDmQELgRGvJbqACCCxjjGgGrONE5CAMJGhj4EoqIhSoEjyOrJhoDqkG6CRL4YLIpAYECMyy2JD0T2HiyKSLJWMI7xisCmUjRIIgahqlAwSuTigKoUMFLo4wSigjhermiWbkAAq+XjBGJABqhIp+EixCZCIIegaAwmxjRIRuSBFyRmQiSnEW5KAGuJIoSmWK5MJyxAimDGUPIcLF5s0uVXqUJCJIbhRsRyUDDLISbE7BIox6ViyG4SLI8o4w0uEnCK6AiC6z2KZoDC4yXiyLZMrwCK7EhnBKsIZGLUcgagxrp/3LQCxQ6kaARQb+RApkr1xAKgpmSICj4QJGYSQj5RZwDCgMRihYpiAdcAaJbsxgpwVmQiyPISKBb00qBCBnDXKIKEpkokJFJ0FjZEggPw5opszuvtwsSujOshCyoApoy3BKQiQGJmqJp8CgCnBKAGJ00zDKKgYCYUdoxoguAUMkiMMsRAZumeKAYFCGJA1K7Rxkp4nuTkFjSK4SbEKM8tD60GgEAioQaCIoUmCyFqiChOvghCK+lyQqUGpyWizDYMauEK6gRqmHKKJOpeLEZoUnRGIEIqSaKikDYKBCZgQEQriSyTpGhaqIgjoCSOIqDWcFYhAoQBMB6txsRoy2TAaCi4A+kjROpMLBrxTqBgCqzS6MuhZookgyWO5KoOdM9iOE4qAIKnZcNEpkYiQI52BOtFIsCqByTihqlDYEBiRgQqjGLpIgZafhAoxsYwkm5RLoBIq0hmSDiSSTZQJIZCVP5WZWLMrEdhAgZkinjOqMdAKJKkky1KwSLA6A8lYyDGoqgYOg6oMoki4EprdVMowqAAahIzCWdAwmaghyDjAGYGaMqojuEyWHKMKAYKOghgx6CCRioKLcsIZMfKKQJGUetIhGpSQTZWJSbMJOOIoEcgyjgIZCLEcI5gknSKxW7Q7Bboxy0S5CYKNFeoakzvAEInsY8kgmICIEo3CWcBIuf89AIkIgYsEqiDSOpIaAqsUmQiBGpAo+BpEwRoh8DioQrlRqQQdqFIADAYquBU62VSZCRSaEKMsE9hh2TCRGpOdMqAwog8FDAK4aKEKAowUihm4MCnyzXCgCDnICDuXDIMKoUjIKImBHJUckAEasBgaoAghoRjpcIiUD4KYELkUGZiTHgGhOTT7IQqSmRpU4GKZo1kI4niggDigAQCcIxKMsx0DiSPbKoYpIflYsBCRDQaZKcIpgrkCjQe8WbIaAagoqheeE5qROMggyhSsIYGbFK5CuBqTigAYAYhI8DiAsj2SjII7hbBNlYoIEZJKmKM9wUGYQRPpUIiieqKQSPoCLQBAqpQ6ErRbyRACCgXbaZEBGKsmmgDheZGaMsoioBvzWqKuQ7oRCJmRWuM7oimpFJySKrQOAogaxC2DujihCYgwohkCzEMRzkK4ihM7loxDuZA4EehRCKgZONglXLQaGLMhDwcaiSSLEaIMQsJK0SiJAyrqQaAogZojGdkRUMAAONAAKPpgwI0Ei4GRG8BhyCihCAqGm5JK0SoBuHDZKIKbAYgJkiCZkSSOgxDMMqkakHLBKRPcMakhAZxFuSCrRKF4pIohkqFepCkahwkRsDpiyDHLghiCEt9CqBCRKqIYgPlgoRqCCSGwDLBg8R6TDIGBCrBz2kCpAQmjK9JM+zEAhQ0CiBqzDSHAK8IZkBIKgiHKMEXpOMEcgxuSjDS5KKlSsywApT6yKCkDN46AM4uhNgrZdJkBgYoiky7TKYgCWfhKkQgCqTCgGcMuBosRiSGAqpgaB+8xuRgBqQhath2jKqAyvBIctBwBkSu0jTLgKqAoiJAgmCarICLrUeApogiAihaLkSAchosh0TCgiGOCGLhSGPtzuhQKAwEJgctGqAKbiSaKgqxSgIEJuGDAGZo0kpwCiHGzDahD+owEqxEBq5N68imiGYmDjZM68CmCqCjBK4PKCogSqxO6aSfLIgKqU9wxoAmIILOKjwYZgKhzuBihe6U4mJAEHZQpz/8wABogB4gZyGCAAJiLJpgImjEUiqKLSeMMCDSgGYoTc8CpSaLvIKiJI7uCXMQNhAqAIbwiisMfAogKlJsiyBqZERqRMfxiuCACqUDYOYGaEAgh0Y4hEYGsYZWNg0uTEADoMas3i4KLRAOYibtniACYixeZgYpDkAmRmSLsABkWmIgRiCWrqEjhzzG5GAKawHnSK6QZqCOugyrSKxO5StMbE+oZmAGACYgBN/sgA6oy6zihiIA7lgobhzqggGDAOeNroyAKsjEPhRmZgHKQgACcBxkAuEuFiZGJI4iRgJ802wAKJIC6QstkoIkIEfAfgooQkRuwRPswqBCJg46BAAf9NQCKEYiJIrww4BmoGZgoBLl4oIFxwgmBCgWMgCABuJ2ScbOKQKIpwmuDAknxKDq1WagQMwC4ynmUGADKUAOboWkBgBC9IpidgTAk2TCqU4OsiCT6D6MLAZOMqDbcMbkhC5UMgRjYOICwW8IJgYqgG6KKIqENN6kSMttDvTKhG4Ews/8ggRGgLQaKgowUMZsHHJA0jAKKMgOzD7lFmBGoGxaQiIkWCJCYG4asqDAluDnBawaLmCCR7yKrEBG6knvSLKMaoELLIYqzD4SYiIOcIbkoyiiJiDP7UbAgUsApkBkSvEOqcvEagRIInBWZKMJbAYNouYN7sjC6GCYQuZqd/y4AY6ghoAtGuQILJJgoqbh46BiZQYOZiph0kaoRGoT9OsMaEbAaujbNQakBKsIpyDjBOJG5cdlJkAmoijO6EAAKhzohlgyAEDK9MogJ0ItysQgYIYWcIahXi5SAXJYJCJEoQdMKG4Uwq4MrBoiwWIEhCoi5EK/SSwIRqImKd4mYAAjAAO8imYIImsJ70SmhKKgBKvEqkgkQuFriKpEAqqpDqQAh2ySMAzKqsVhC6ECgGeAwiaI4Cxcpm6I2afIgGsNgmQIJMIKCDrhDGcMqmFOIgw+FIKiIgg3YEIwDMZiqAHK1GIuTGYv9TpAACKCAjYaMgwqRGAmRnjK6ARgLf/0wAOkoAJiYmyWZiIkSkT2FCAgJlTjZMagSzySKkTAJk4B5+zQp6AFqpBsQskkSwBmIEBSdgTgjkJ00KIKJjKMJ35BIgRCoikEWCcgwuoD5n4QpmKIr+0O8M5wBGqEQzECxGKCJY/ogkIi5OZOaIepIokqjOLiAcKEYQdgBj4QJiJNJgag0j4RRqrN5sggx2TIi2wGJMJaMECAiqIwGKJOgPcEBn5AxmQMJuWMCmZkjnqHpvyWJCICKuHDZSKgAiYIZ+TmhKKgKNNwokivwKwOaAosQBRukG5MpKIUrYeEovAI4gKJJMqxHHpIVDLFVqgAhmxQDnJEYQNMLAEEDqQ8EIQBwBxEInII43ggpkzioCSEVmIkQianI/0yQCBC6si+1G5MLoDGa4g4CCQCqMsieJJuRnCCyasEJMtkhipJIqRWJAqAt4BlBwDGJIKN1u4R4yKF4tBo4sigi2gEZkIcNgRghmAoyKAOoPeMYvYlSqBChWZAUGdojnBjxD4MKAZAa0VHbIqmQKaIa+TigIKgdBaopshqojYNLoAEehBoCnAWKEDLBKvo0yxARgBioBHnQFCykoF+kChCiSQHQIJmSMp+CKBizOwEiFLsghZ3aGQEYEtpIEoMKq5Ip+P8wmgEgmsF7wRqjG8gjjLEtoiiRmkDCDoOLgb5CuBGAnBbZ+SoACROtEgg401ixmzOpqTSKRakDPqUQaqSJPweaGAMKMcMKgJFxyRiQQAOqMBEzA78UismtOAGBAJ0kIpvBYbsEzI0EHIKIGehR2SCogBmzKvk4oCigCiX6ILAIoZsEjBGhTKMqE58DCYgFKdGJItoZFIg4sDQu4kIstBk/h4oYkzsS0RgKolSdgRhIsgk4kBWJCqRa2Zg5oRCTD4QIELgRMPq6WdkhGbAJacMdkoqhQMkAC8Msk5lJsqpSugKtG6cqALFasREInCOrQZMJBBsj+y+BgTCMh1Gawnmosnq1GSCxMCDqIRqiAY6CKACRKhcJAYArtxu5GCGYqDhzogciAIrKMnL5u2DJECCqg03RGoKKsEGroy3QGhSrMrCPIrgp6hiBmRG5UJEQirARS8R4gACJAr4xIJCIWpNnmrFzi8R4sQA5oQJAmaNcmJUssCghqBERMQOjevAjnqggGLGAKaEliQkhBc6dBYyBIIrBYek4sRmYEQzCOekgAJgAuBmCigH/MKkRILuDGQGjjggqJwoBlBwCvjMttBgswyM+4UML6FWqgDGbIwANoiAqxSqZAiOsQqFYgapjqYKuKZUKIelAmAAACwfbQMAZgYmSOpSdApqiIMqCHRTLGgAisZ8EqjHaAAi5BEyzmjjBGSW7gzmJBzCqEfIvogohqwb8MACRommoEzquN5wBkxwioi2yCGHYCAgQgqtUqDCRmyerAuopgosUqyS4SZSbOpcMmBkImmTZOKKKiJlI2DixKMgLVMEcg6mImEDpKaIqBLoYARCwLAbJOAOIUwm2D4AAGqETfLU5CpUbDDerKQYbEZEqoohw2BkICBKoeaMpIfBAqAiYDgOqUNEogZk0u5ony4EamKB4wxwDqgCpKMMOBJmBu3KhDCK4ibBYsYsAiieqAKgoAaNqyAEqk0a4OOkYmRCjbIAHPMETHdk2qoAzjhOAHKMYKfEpCQABK4MkOdsVGZD4LICIQLkiADyWmQ8EiomYGKpkwDqzCoC5KMEE/C4AYrgguliDzEGxCZAJkZs48miQqkK4KIEblBoQwVCDDLMNAKk0DcFGi7Nhm/FxoIhBqiESnQMJGaUMgaBRuSAECxWpaMEQqKsVqyGyXJKJQs1SqAmBm6E5pByFCYCIqxKvI6sAuXqSi0PIQbkYmKoKgwnJiByVapMZiOgoqBQhiNFQupN6qYN5wAJY2KB5polAsCgi2iKIiYSOggki2jCDiRWqYrgouI0TrDLQWJAYAq8luhCgC5Ag0kqiIKnQHJWNA5sBuHigKACZEpsSq/tJkAkZySoTIck3vIAggRBDga0RGvpDi7h3ibhEq/BxkJlRuBAiyzKYiYQOkggokAQsAA4yoimUHIYKkbguk40yujEhyatnuYEIyZhh0CmBKpOKmKItlI2RmWjCGgGZNL1RkJoBmTHrEQqYJZoTuFnJAws5BwwJgUCpEeF5oIJawKJss4hasihA4SAIuCKcgBGKFZlCAowFLJGwHqKJQMpDgJm5dJigStiZUMEahAoDmoCpOqaNgZhLpRqBmkXLMZGNgqoy2xAAmxeKMsArgZEJKoWwH7FTCrCCebeAScKoXKSJWbIpMfE4ALkyu4EpCJMKRYEYBIwD2BzDG4LJUpIq+GCgkCnYqFnCGwQKAoiMkhyErIGZSpcLgqhS2TCIqBG8Ysk5oQkjuXjCOrAQEC/8JAB4uCGqKocqiKp3mZgkmuxzmYgyuiEUjgOIGrMroIgzvlEDijLpUJiACI6DmzCkHRPYGxaagQiJ+VDIKJEaApg68UqijIKZKKJpwAkkrBERnYILFLuYEAsXm0HBSJgAEqwUCsgMI4GMoFeZwHKp2HGgmDGxGUPbIhCqBIyRgSrxKBKgUMA4kogIvDPMOLMfFNkplAmBuTn4YLg5oRkQsGn4OLGJGKAxrTTYGhSaACG7kEjBGKi5aZObQZKOAjg0vCWLubh7pgg5sSE7p4CPFJiJEwGLN6pIkhsDrBjBW6QrFalRkQqCigPfQskaBpsy0RyECYCZEMpgwSmhCAD8AC4AmUHdM7uBKJuVKhTMMBi4QIigm0CgkY2YMxrgcdA4kCgDDISZDaFCjKNSOvNJiuRZoZA5ogFJkZBguBCLiJSOFIAogw4TiQjpOLCqctlCkJ0kmxGQDLFpsgwEi4ERnIUPA5uRAAqTWqKcI6gJIhjrADjoEa0BEYioMxn5YpACGLoVrxgTnCgGSsBCCflQqJhBqSIBqxRBm4aMgAIKqRFggSCxHhOInMBYqCmDjjT4S5OLEbkorCXLQqkxmqJJ+0G5CAmQEKlWyCmRGbEqFM0hkAmAnDKYGBSQ2TsFIagIU/sYCiWdA0rCEBy0eKgAOKExIamgZ6kIELkgigEyeQMqACGBMbGPFLiakEm2HoSqQuEqk4oh3BAKFLxSqBEa0jjrMrmLARiQrFfIGYIJgJgg6jCwCYOvMpARKrWKDAcpqCIZwAqRaqKVL7MgLeFhmYAoAJAgGNFiqpE4uSKqWAQYAaxDELuhOeqwcapSkq80qxCwDLF50imiKZARitQulIuIgRm4cJOdEogQqSCxjgMNooobF4iAEZnqMhCyOAjSnSEBwnC6Byygkj+mCRChGDHYIDrTSLiZQaCJgkoEIaEOBBDrCCiYkSm4ETfvMpqCgJyiatEpkhmBKZnDH4WLiIgQyFiSm0KYAaxDqJ0jqqEMkBFAsUiyDRULF5kotOX7KgAgKcQ4yyJIyqBwpok4kyoj2Bk0qYMNuFAByAoScqEwqyQIqLwoC6YrsEJw+VuiGInAGJsHnCKwIZqSMM9SuImIgYiaNpmgOJO6LIasIKMNoLhakSMYwYpSuUeRmySpuEqBE6+BU6yxccMISKIZMtoYVLkTnLBjmZEMM4g4lI1CsRzAMAnRSrBRkL1hkAiJmJgthwuSKYCJKNEdhpqAkIsGCoObMahoqCiq2XKwmRmYNJmIAp0SAYhjAoCuIRBJpQqYoHPcIiizLDeqGDO7J6uAMJkDvzSYIQecIoCqoGuTu0HYQIDqMKIIm7BBvxeMAgmRGYiRDgWMAZiLFJrr/SsAoiujjSWZkawohoyQiCmDORL6O4MZMBQQyOlJsnOJoClJ9DuVCYBAk4o483mRCQDJQKE7oho0sDqFDRSNkQkBoDuWiwADjgSrGvRawTmhGIiQOfJKs4wBuVCwKZMMApliuTHbKQWcgJkwsTgi8ErAMokjItuQGwhnmwKpWOI6hZqBUJMJCMh4A4qA61iTDJEJM6AwwSwXuyHaWKILArpzsIwzqYC7OPJrsxyDCYmCDaMdowwT+0qDjBKZEMpisQmIiTPbEKkaAjAU7DAIA5pxlZqImSecIhGdlCqiCIsJd5AJkgwVgYnIOxeqMMpSgIkRu0XJCJEKFbtRwUmiJgIzAIggqAGsAstolA4DihGgiwS7EwuVv0CEnCK4OaBQoYmKADK9EJoZByq0iiuHKZkTUOkAABCUHqOJSKQMABJSqIArhEOA8RwTmiO9coAKFKsj2jHIGoSMI7phwSqTySq0KtCJALxVuFmhCiHIOeEagSjpjETJKAGpEBkEnBCpQLINoioDiUTdMZGbFBoTsU3xQBi4QJqRYqmxWgMBGsA4Q4C0rySgOtIqFalAyFmzDQGZMdAooRwHmpggiZGcgLl5pR0DmjCxDRKsAgqT3lijjBKRCZhRoIoRqTDbMpi6Q3HRGQiROIoXGKmQcpDDTJChe8M5CchwkpkhqyWJCcsFMwDaWJGJAqs0qDqlHATLMMA5kosEunLIKKgwqZkivwIo8TiRGYDAGxe7IIS8KhXJGhWKmBkjkKoYUMMNg5pIAjj5O4SAmhJimBkijgUqmKM/pBqJIxEx8DwUyWHZKIKaFKtDqDiiLAadEqs4pCyyGkP6QMApoQAJ+jCLo3rAIJAosAkYootBod86BagcBImZGDOpnTKIkAmSnRJgsrtQwSigAHO0iUmBBQyoBFzhEYsEIYuyOnPQQcs4AooDvHOgEAAchowAsGigiSKqM9A70EGK0DDrESDQKpM4uAkogepIgPksIsgrE7iIjCeoiYA4pA6SikGxIArYQQmoKWa+/zAAmyCzeaCQOKwHKrCIe4OIKNtDELoTvzOhHJMMJYkh2FqzDBHaMbAZkw4VqTjoMKCaM78iCZKbIxjgSckyrDCUrRACEb4iCKkYQrqgW6Ig0BsCwmyyEBnYEw2CE58zsEkC2BANlyqYwXmBiDjZMoKKww4hsSqyDCapQrBLhYuBu0PJCAOPFJkowTuUiyD6WaghmhAAkYvAQdtAAswZExD7OJCJkXmwCRiTGai7I4xUoosSjDe8OQeqIbJYqgN46iMMAaAvA4g52EEAmSO/MZErtIxDuEKgKgeMEdk4sSmSjiOaKLMfg5wy6imiSMgwmJAoyDnwWbKLAAAk3DiV2fwwABiAELENE8kyugAjn4UZiJI5kQgBOeADP/IxqoQ5maQdJKAYuEEmqgCtQZINlAwUm0OhKwebAttRuDqjDiOqKNM8pAsS6jigMcowEOA6wSDZQKGcACAU+yiCqRAAgZwIM5rgKoGQaLA4GJMonJVwwAA7tTgJtjyhJavLR6iAgRsXmSnROoOaQMghABgDHQaqiIgImRiVHRO4aOkwiqAKjJQ6s5pikp0TnZIdBbqaMFLBnSOoKYCAmgkl2jjQGICrNBjBUAHLYxnDK6kXC5FDnZRKo8p5gqgw2UIAioOvFKAYoSoEq0a5WZKLgqsky0GxLIWLIvo4kAuSjTHJUmsBJgANEbIswBrBLAW7GwNywQ0UqhiCCbiMUriAiVGiHCS5OLAptC0TwVqABFDLknqwEQrZc5EKghwVkBnQKgSbEclhsTkFnDTJKaEbgqw060GRiwIKIvwQuCiUDRKoQLAZgJ2BC6DZUJIIgR4GmyiVmoqBIZwDkCujUxrxGBmCmFqVKhQAiqN54CGou1eYELFbFalA6TiAmgOcJMBQoQomuzHYOrMMEstEuTiSDQK6WMgIqEuGiyHASKAJgq2AgCj6IBCoBYwT2FmSuRmZE4maszxDsCSPkTObwUgkjQUpmoZK2BEAnDSYGaQ8FahR+SiCmxKqQrhSog0WqiDAKbIuASoAWMI8hJshmBjBDJKrUtkwoSiyLRH7GZCIkIsjFAraVMgLFYmquHCxAAiYIqhMpRuKBImJUpVasAJKsoIfoUOYrTWIigUcAwIZ+ioVqoGLNalBkawnqoGJQdgpAr1ikAmJIMkKBK8yuSiYQBS9MaAa2kHAmzS5UMA4AZghvBK5O5SdONpjgKkimiiHG4gESdkiEa8UGwGgPaMYRLxTgAjCjSOoKbpyskqUmlDDKrlIlJwk2SkTuYAJoq0w8CuVmxKQUKKMMuoICQK6GwaKI7w0gYqyLJPqKJgJlA+DIouSyEyBE1rKBDoJlzyxAmDcMgiJhB0TkDD7KROZoAs2lwIwACqDm1CyKtBKhIwi2DoEqggYsY4g8SqUixGIQcELE7qoLIerKYQbA50jEJGpLJULkKtI0isTJAugEHGyGCSc4GCYsXGgsHiiABsCM4GtgXKQkq0AI6SfESSaBJkZJNiNARLLGBEJMOKdQpKuCBGJiZgwgusqBMoYg68hAssgMtAZAqlCAr2KNMqoKUSTrZtGqAokCoKge6RCnJOIjxecEIIpkqlSiIWbAQEcsRwUiQIKFboow780iMlaggkA2BsTqt0pEaAJiTa5iySsmCDZy1IByDkRkkuUikKxrQgCqspRQvEqAZAomAUouwdJwSMNlYkcF50ogwkDy2KIgocDMQCIKKAh3EiCCoKIGJIo/ToEqZojIbAJv0OY3BsSqRnAeJKLQ+gpgbmbU6G8QJEoIsF5gagMgSDJi1MBoAspBwmIEjCY23EBs3uymXmjn0GREKAKBhmSmjjBYdo4AqoTGLAhiP0pEYG9MCPYKfpAoLoMEoiZkEsnuxCQGgjwnTID2wpToDmUGhGSzwEAigIZEIW8GRYJmieJmRQblDCrApP/BqtRkSmSC2OpEp4CCImiKweKFAuUGcmBC4QasDwEKLyBW9SKukGwG5FQBOpAsBoCyotBg7yIcaEoAZEZopvrUbmRLoMzzxAxqIlAA7lRy0IBi7Fw+VLKSQQLAosRAi8ABAyTjRKoCKhgkx2EKqEQnLFJoYkJAw4WjJAguLoro6sJCEYDnyTbOQTMGTKwnTKBm0WoiCiiyxokyyioVciJIbEpylSQiRGaR5kRqyWvQ9thoBmSGAObBZ4TiaGKMpQdBQqQMc4hmJgAEbBNlZwTiLqAQdoYiBEQoR2UHST6KoIo6SiiKoQJgCmjnSHAOtAxqAEJAZm9VqkimZhSAUHYINxj+kiSG4IZM9tDvUOqEYsRE54WioEg+ziAmAoEiYoXuzGAuhkIyEnoOBL4OpSaQ9gbgyj4OoKaIgiTjCP4OrIJEMpToawjAPk5BqoRCCiUedEYsDjiW5QbAABjDwUvAA6UHJIpsIlRsluiOLgSr4QKoRAp8lmhCoII2CmCvUKqMqiLJJCcRvkZAoC7MZGKIxiUHYabgDLrCSgUuoklmrh5kAhQwZtFqAIOApgo+nGwEAgBgSu1LZMp6zCBgCoUuD2XDZIZoRkDnkTKKIAAyjDAKKEpsTuHu3KB2ykjuIsjuBkSEqpzs68juByUHISImRKRnQKAWbONSaZbwUiQiaBqlAwSCBKcBJwDGeA5A7o5J8AdBJwCCagwk65imJgaBbspAwqYMMk4EPt4oqorFIGsIaAqhokolCn7IAHJGYBjsBuRUdogGKEZi5d7sTGaA5mbgXepARo08Em4MC4DOACImCiDqkgj2B2Gi5IZgAkCnSaqiDqUnRGYMNkwujO5Die6iTABkBrwOhS5GTPAiygFuysXCpOMlAyCGCnFOcoVWtAwmIioe6YagREDnwSaAgm4SoaJCDPQDQabggqSGQO9VMgYCYOdEIAZwjnYMJGdRLiJCjSoiqBLBZgJgTDRGwMQ2ow3oKk7hasAMfBgqAAZMKYusknBDSWakisFuEjIMYqiDiWpiECTrkKwCZkjmxTMUZCZiiW5ihAh2Ri5UoC8cYHLKCADrBCqc6GKGCXJijOgqkOqQ6jLGoULtCky4VKuMyipAUrSzXGyILo0kY2FG6I4+UCACQhD+jnZBzQAyhCZEZoUvFKgmioGqpggEdshnSOYChWZvBJQwRmIoHHAMJKqCTiWmgkViLkLJfsYI6mQeNgwGcNImBPdaqQqsTgUu0ONgxyya5IJmETADRSbgguUGpKcQ5HMSIKrCAERrYIaFLAfFKmbOBe6EJkwEaGMU6K9OAOZqyA3jqIcoxiQSdI5guJLoiIKxHL5SpIpoCgj2iGMhSvBSZGBq3OhjBEYwRyCQOgJE4jaOYSZmhGBmbAuFpmrRbiJKhSZmJArFpCrMhT7OgSJuWmSG6EMkUqiIJgzA/sNRLEqhGH7TIIZgBggqCCMhyvySZGAilLBDBMKoRuRYcGNM5jaYP0yACOIuQoz0Aqpc6CaUIGZnGKCrAAAAbkZM6CKQbg2rBATu64zugmxdNSAYMEKKLRKkCS9SIQImQAnvCEpwgmJRIC5KzfKDCMp6SkSgMs5FcmLQZKqrCUAyxAhoIgLFPoYAISKqxUo6AAw4Ig4hZsRCgMQv4I4iZcsgYYOkRIMonC4CjSJgIlQxCuhIgvAQIICzWKQrEGRkJwyEMCNM5G9GSWpqyFB8RoQkiu4MYD7GUOwrGKgEJkZozvwOKEogQiqQ7CPsnmQkVulm0CwMCAE31GRCRCJFa0DIMAYCSMR+jkW2yoTCsAqJJCdGCTLqmCDvBgRA68hgJgAmCHArj79LwASgbtIARD5KJGJoAs1gJkRSRetmGGxm0CRg5+EKpADOpPseKEwyjCTjxIYqQEIIqOOMBfOMAKaiiOBq7lztLs7J6mKAELqKJEKJPopBLoJJZCsMbAwqBi4cLEdE4CqEEKxyxsWkLtSM8HLDSaZgSDzDYUpsisS+0GAyEmyKQGpKTf7KRO6GSGgCLtjgrwbF4qxKTDxPJCBSOEJErCJSYarOKIJAJGYHCesIZIasCoy0p0RQ+gaMqGIzzMZkgskz1OpE5si4EuEmIwVC5MJuFTBnDSomjCCCK0xE7ydRamQGxOinyOYDASLEqoJIZO8PIcssikB2jOaIvE6kqCG/SsAMRLMkVgCiNktlioCiMEPBSqwKBD6QZiyTKEgiMhAE904E8sZIpiQDTKQyhwHCcApEtAqgZBJwEmSjaFioKphqREJAJg4wAwnuiiBGYCJEBHYm3SQkIwA0nulALpSkAiLFLAehRi7FQsEqgklsI8jkKshAYjqSYOYu3GhAY6RQLiAMMkjrRSbg4lY4yqRGKE6mLB4woAxDaU7AcEwj5M4DLRIuYESjyWagg+FKqkSKOsxGME64kCguFgC6TkBsCmIAIKfEhC5DAcJuBAbhguQESDwO5KQAC2HiyjBQboCGZkXqhGqJ6oakzKvkjOOlAkLwnKQiKlyuAItk5k7sQM0ABnxSKgwqJhIA+xIgakoAACYjgMRy5g1ivAoGLEaEJACDDD0HAISuYsUqYGcJAqQUqwnmblAENgpCgWIHZRSvKM5qycbsDkGmqAzHPBBisNpuDCIuEGAyFuDqhkBApquJQrQKSKoqjKsliqoBguYI6sMFimRvFKpAKlpoBMPkzGqoBE98xgJoSIflEDLgSqJJoqyLYUZyDGJ8UigkWrBOJCwWpO5SZO5K4UZsYwzKfs8EYHLMKgEDJgymaonq4khJ5uxcruCmkiiEw+UQboJBYmqgUga83qIpDuZ8nmQCJAoswgbtTAPlDmpFJ0imQkiqZpluYhAqAiQO7UYnT0ALACiCaIesViyHZUZoCjISKEhHMIQGvoxK7OAbZOZKcCAWKrCcJqEWciAMMsWCwLLU5mRKQjyaaCAOcQ7k5ooBMk6BZspkwsAykOLsg8jm6MvwjigGYIgvBSZnQNhuheLoRmhOYLKURGxSrozuQ6xQo+SQImwM6+FASj7UoqyOJnTeaAAKaUdk4kII+k7B5oJEqkRyTChiZ0mqhCMEwuUGwMKu0bIm2GRiWHICBixGyO4mnWQkSqZlivQAj3AAVuikC7DQroTmjGwgj2h0F2jgDymGwLAOgPRHBW5KpKJEKCIWMAJKZPbIJmSJr8GCZgBALwjGBmDCL+GCgCRSLuq/ygAuBeLsDGZEMI4HZc9kYEOsHGgMMpCqKJboYEflBibB4uCkRsSoo8EkJsTCQqhSrEhvSKJkZ0hmESuJasBAamJMYkVAKjLIcwzqDDgcqKZAAuyeJiQIIrkeaGaGSAXipKMJwuxaKiweaGhe8I5iLBpgLFbkqgqkwqYmIRb4xsRoQuQBBrQQaCYOYO9iUSYE4u6o2zpQqsiGCTLEp4TuECphEzxMQnwGCqXO8IpAomoUqnKVJrDeagQiJghIPowgcowiZEImxUIyTqkKbmIQQjyWoHASrK4PYSJMKKpO6aMBJqJQyDIIh3CMJulKSvHWZCyHZAWioOMI4qwSAD6j/4wAIU5yiSbAYkJAQSvM6GcMbEp6jCAAPtAqAKqKKNI2ySKqELKGhECPaUZnIQamoUMATEiu4MLnRcMoSi4WoQBrxFBkYCoSpYYyiKQnyUZ6GGpEImRKYL5UKGYKZIJuDuHKukQEKiiLaMhvTORjRKhnwETC5MBrwSJiRHISSGVGpjQYKmDCgyna6ADDaFDmwScIoigSYnkW5IALKQ7kIkIMeMdI5irExjQmUihgo+zSLmEHIGzH5QYjQWaCAABCoSpmTDEO4i1PCLDWKuyWRnyMA21WpiTLJgFHJIKExviSAniWpCSLKQ6mJApEtEsBKgLAgiArSQq5CqQgQqYAqX9MgAhuySIukIBrRFJyDIKyCiIihOAjxQhjZQamoQAK9gTL9MxjbNoqROpMKiFLJCUD5QomxWbAgqYMwC/U4DLMpoByVCwI64RgImZAQoFnCOBy2Cjj5EiqwYZuRiBO7QojjOEPeMZCYK5cJqFLKCFLLgkjASaIYnDOoHETKOIKpYLkQiJRLkcFpmbJJoIuGGZg40AkaoZBAiaSocMg6mJMMo2isAZMb4UOuAQMZikLKmycqnhcamSSKnCebAhmAiSKZoSEr8GILxTqhGoiDEB+0CBySAJgpsqlDPPEQi4CoSKFrxTibhYopwCKLglirmDLLFCrEGwJhrAAC6xEx5n/y0AGqIBPtIxnIMRi6QoipMAD4RBrBWLAJgcE4KvJICrAiqgqGuTEc4hsYuROaAXjyKAnCGUngERioMCn4KICjT6KIETC5KpDSOqkz/kKBq2WamUPLQqggCpGEL7QIiye8E4qJAoIdBLk6gZmROanhYIuAgQyQkakzvTXAWrgFPKiGioIYmzLaEJEJGpHycLkQKNggAInIcsqIU80DMOlBoBiYEbAuFbkQIdpBmAmigh0BsnuYgQCrMOEpGgHoMKvCI50CgIBbspFb5CuDEQ0h2DmwgDjYBEiBGgDYEYoS+2SamUSsuGS7ExigGSHKkWCo0HGpgDHLKQLIOBjjOxrgAmAAkCG+gZUKHBTKGhHpGAONkjGoqkO+kUiyEEuyyVG5GpKXCTiEHIoV2yAR+0OIyVK4mHK7M5komoMQrweZCjXME4qJApEthLA5mKApqBrjMx8CoI0hqqBSyzLgOonRWJuDEAgDPaKdEquQSsSAQwCrMaTLQBL/MgDLRKmJU8sxkRmKA5gPB5gJBawjmwiRgx4TqFmQmSCpC8QRG4GEjSigu1bLEaQdIcAgibECIOh4mYIqoQgJkMJqA6o8t4sYNd2BMfskmJlEuhKQGZoRgo+XiIoVmwIJipEBKwHheaCJEawQkJQ9A6mIWNgaFKshpiyApBwAkpkjmkQMwjinb/IQBJ4YlIkqlosRogA7w3jbFQnKJhuSQMghioGYHISgMED5UakpkKAYGeJZiYIZmQC5gyrKIsoNhLsUGCHRW6DATAHZMIQYCxTLGRatiQWYG4YKigUgrBU4yySJviYakimjKokR6SkC6DMawHC4OLqiKAriUAqgMZwaoZAaKPAojJKJmRc/AhgdsgArwgE4sWCMpIwCmAoh0TE50SyTmgFFvDKgnRC0Cma7JJgpiqQsmKVAG5RKoT2YsBEr9DAboyuQSukEGgmxMb+RkAmUTISQPMKBLLGSKYNQvySrAZgijMNDDpEQi8hEgLl2u4Ax2wBgyDChKJoDmA+GmRkUu2AycAvSOrkBkj31GBmxKJoIoIApCKOZS+GqIIcslBgb0wEq4AFBuGGqoTmxoGuTsXkhuS2XqoknmwAz3RAEvUSaAQAIioEYCtJgm4c7kimqgoEvxQkZkQkJCJiJBSuyIp8AsgyEmhGCHxGzLxGxKxUAiyKwLqIBD6MDHYICnqIRCahznKhCusFx2DGgGAqRgY+WCAkGjAIJi4IAH5SAK6OJCJqYqTc9ohCMCqIKCKFQwVqYwjwQ4SoECRoEmYyEAA+UgQwSg4+SAo2EMMwiGM0WGpEwsSiLArkqAfBoAblyqSmoojyA01qIojm7Cbm3OwKhGGrAmSjhGQKYWrIBXMY/4dAAMhqjeqiVOtA0r5KELIKAKsAkGshjm6lTvJFhyjKYGAuTiQ8WqCkGvDOqGZKgPYPBW4O5MKuMsoIvogELGdEckZA58UqJs2uQskiQgz6yqFixGyjQE3uRgUnREBipEjj7RKqieMFAsCCKkKMPBbgwAchxuSqRsDsA8loAsDGtgKixWZiSMY/BiRDISaWKKMQMOLMKI5IfgqEtkYMswAc6gAONohMIylWLuGK7kXDIQagRiqACjwSAGQWrJLoakbBqo6F6gJkxuwyyoUwh4TiMoogp4EqyGVnCGDrRETiyDYGhDoODGzjXGjmijYiFKrBznYAir5IgyVGRGIgSsAJgAYkY0TSbhFqiO5mQoFvWGSiiC4IavgOQDBLAPJqmHYMZjKc7iYMLCaU5gAErwplI6IJqoARJiZgqxY0jgJlzu4hTzQQpoSEIiqEJmsJQiIN6wzmskog8xQAspAqACKsRsywIxCwa0wsgsFq3GxmiiUnDiDGSnGHYOMCTOaEDehi6GbSsRYGoc8qYUt0jGZgiKrAIkJrRQ5sEebJLmJCZK9cZEJKIiYiqoykJ0jCfwZoRqTynCS2ziyqmgCuXKwiiPMCSWaiDYAuyKqHbV5qIYrsAMu0UGZAhCQioGauiQoqnepMqi4K4SuQIKZEIGpiJ4BWLAKRNqZMdgIIcrC/hsANtkZIcgKNLkpI9soko6Bgp9EgYoAAp2EKIiVHJDCa9BRmCCDCakZuYwiIipXCwWaqTu0jyKBCgCBmbqbAHTpKILLGRK9IwCvU7iJQcArI7AaBMpZoZogAPp4ApiIIcoVHNNBm8JYmqVcoRgBAKkooLpSAMBysECgipoU20mDiAiCmoDbCmGwCjLwGxDaMYDLYZG6QbENI5ENA4iLhaspo9t5BgiJELBBi6Vg2oJKuYUtlBkRgJANgZkYgqN6xTiyipkx+SoECICACIj5GxGRu3KynBChiSLQCyfKGCHbMBO7ABWuEoCrEoEsJhOMkIFKwQU84iAM0TGOBQoSnAEhAILrKKCcIijJN7pDqLkahq0xApAZogmZ7SkRwRsU0AwRsCsDsFyliyizj0GRCxGiHgKaiiTZQTGyCR3SMTy2aZmSO9qFLaM5ARLJKZDaSICgeLIggsoaAusqFYgaozrwiIsDAYwCsa8CgI0SkAwWugmCnFCFjAIBvBSJuCmhMnCUiYnBMHumObkGHLgEDoMqAiGwK5DwOrADesJIgKAJkKgcBZggsh6im5oUAJgy2awg6Qoj60gEyxCAunCSmkGhCyS6mhmBmHSXiSi4MnvTSJmTHLiVLqM6EyLJCAj5KaGBcMFIgJiJAdkqBJhAuBgJ+xkQELlhsasw2Ygw+cv+JAAxsAoBoAlUsBsTyhgh6ppCgCpXqagSHJM3jZRIyoEp6jOaI0GRmxDYHYIZCieMBICbAICuQ4kAKLkRrLlYAJErBukKArpJwYly0RoCqhBjuApCySGsgYuDeoISCL0FPKKCXNBDnLFQugUKExGRmgHQHaEhHYYrg4CrEcCcMhGKFI2RjKmKM6ogRfgrg61B2BkUyBgAyUgEuykFiQC5AYyRcoCTmwmhNh2nOJmFC52FHZM6EyCxK4LOGBmze5I4g5upIfqMFZAIoinJqKpLpCEdpgoKpRoJ4ViAqAiJ0XCRmiABsjrbAjuQFjjYkCyAlmqwIz3jCAy0aqAgEAFaASQAiYG5K8AlHZVJoYkZmKCPMpA5oTDIupswso8jm9pBqAEZzjSJ6TC5CDa4myMYuKSPszAZFpAKu6V7oQYswjKc4ji5JAsjIIGrMewpmBMehiqDi5oowp8iAosTCLKPuViQqTuT2TuHKZAJmTP7jhKZUQKoKhLxGcqBKyOGKJDgOQumYKmTeaixW8AxqSITIOoguZgYIatXCwaKmRCQzDECmRIalJ6pSpSaqiTaCiWKhAqQQtqsMrpwEpgJEpAc+QqJIyM5AuMekIV7wBE6wpAuwkioIQIRyiCYjZF6qDKLF5iZGKHbKAOYIYkFussKJa2AMMiKNaolu4Ao0p8gLv8fAIo0A5uAIbiqzwkJVJI4kJjKcKg0jpNBivhAqwUbAwKRjAPKKtkkHJVJgagKAdibMgGIERuVm68wlIyRAq2ABBwEiQoj+J0gyVECmBlSsAmqvAlxoUKIAIgwzRQMoGOK0UC6FQsDQaCKEbor+UIbhyqDqAuBwI4BIpkCHYSarTACu4GIuq0XDASICgWZvRiweQOQGkOTjbjLCWCSMQiwMintFA2xYonBSLsXioIwogqQqTnqIUyWKwSKqCConRIhqQQeg5qtMIIMuBK5rQQslAAaFKCvioFLFpkIQJQJu8oKQBaIEIkDQZumTMkiKfkojAcKAjihCKm5Q52BekwBJACbNKmQCoO9ICCQMMlhsJ6JJaqgOJnZKJoVmBkVAs6aAZpFgIkpF4mgy5pAEiCBiQNwwBWNszgJ9DrYQ6ojKJMZutBrwCAbhxoEiYgKkJwhCIIRmST5jIkVmomCGvuBG4WYSROSv4uCKRORmngHCKnKi1KCGBGZBEGMlizAIjr6IIyHCgMokpgK6DC7hWuySYIJqImgDgMpkZFroprKK5UamYOanrF4AcpSALCbngM5oyjpYIKonKgaQxQao0uYhWnrEhqyFA7iSaEogTGIoAiflRvBUqpDiRC4KuAoqCEAqkOwGewgCaERDNQquzSoHBcKCMopgCMKFKxyAunq/iEAEIwRMpmUOqCFUewRKbgWDNJJoSiRIAGJkI+GDKFBuTKAGMErwSm5SAKrQZGJ3RCAkYE75RrIQaAOBYkAqRoBECGEu3Az+Qy4SQCUOZGgeAilDZGBWdEhrRSZIokRgBjwabgoCoUaAokRyiG7BJ0hkQkQuLqfBQmYCAKfgZtT6ECRiYC7IQAQBpoAdaGsiJwhU6kTiByHWNkBGZgUHPE4oSiBGBGgIq+GDLFCqSKYGMI60EjIGACgGKGZ6SkCgJoI+xCtIRSsNcoAqJ4jCQIYqDlnyQqxKTCHCoG4cRi0HICze6CDnwSJAoARADrwaLkBS6IqA4uEi6MspY4ShwAgAJkAAaq4OwGUO/wQmbtiApwlqwCgvwUYoBKKMDfrCZCIaKMKAKBDSvM7EMBQicQegwmBEBGQMM00nZJKskqBiJEKokviijTJACi4oR+REArgGIjJOCLJY6kIuL1CkAAkyhg3uYyQC2CFixGwYAilHADCaqACjwMJAQiBAjrwcKqBOpMbBAqAuRMNpY4iooogsIgLoaAN+Qgw+BsjqGGiHKDZOoWJMYCqRYDdAAsQJ4sBgkqgBY6IgzyyJK8imBEZkTId5Fi4gRqEigOIgbg5yRWvEwCpIAjKEJoCoK6ZIbDfIwgpBwyA0CwEmSABuEAD3giKESaKCAM7wRaOkJj/IADCQQrEHAMJkRAYpjnqJAyhWaAYAIEIyiEpxQwikZwggMkYCieZrBEp+RwCgCijK5jtWZKZICKoVLCukIkhhZhJlCFI8QkbszhLtywCCIIZmYVL+iKKolihKJABCNwQKNILQ6iZMJHpCSoGqI6BENwYAgAppDvK+nCQCSiCCDPpq6CLUgSaSAISYvqAKdAQWeM6kRGAiSihGK8VG7FhoCCIGLCdgVixDCOhCgEQzZg5JNiMkBHsGRGTa7JKuupwkIgoghgy+JsIm1MRyUKAATTcmEjACGHROoEQkYgpoQCvBwqhUbgiCJigjgFA2CoTmIgAgNuaOifKCIkjvbhQ/xcAJZkinBLcDBapiSKBMIS8KvlIALEsEgJwoYqLhQsBhp0SMYEBmgQK8Dmooj4EIIghsq+pUZGtQBK4GhGDr6pxkamaU8gKgDO7M5uD+g4WuhghsTmUuinZYBK4jFKgUrgICgCSebYcgSGAADDIMJwFDOFJgRGJMtGNkDHQizEUuhgQoPqMMpOtixaZqyAhsCEqw/APBqkJIQAZkZwh6zEk6AkhkUS8ARmRAXHhKpEhAokgGJcOkxjLEymEiEnBG6hVygkhoIkZsgv7UILLiUDhGakhiRM44a06N7uJIwiIJrwI2VERwhwQk2iYsCugBXrBGYgioCIYwSOfljrZIgAiAAIYiQFByYA7hqsJIriKjDSYvWCErKAwsLCdMAeJiRPJn0SoCpEhGIOeGdA5I7KsaZNQsKkpkIJ7pRugUQKIAAkGj4MauEKYAIhioZqtNbiqQKOriiWRzBk1qqmLArrZLAaAikOh70CSmgABCELZCJiLRQC7EBRKlomLk0kbF46BECGpIaBw2UGbghmCkCAKB4ygOpScEAKZiI4Fi8AoI9nbWZO4jREDigCEvAwnqRmRKRKUnKiYYiO4rVGQNJm7MRujdegYAAIQiDStBAyCCIuEOMkUGbKeACHqIBjQiiChoq8EEKyoQ8uhDiSZkEDIunLDCgAQCCPICuIsMyMAGADomUGZJouxKBmxVboYgCFBoCadlDuyGJohg4oTcPMsgBKdgAGpqQBR+QghgMkcgqnbOxeKAIionzW5GZAhIBPrGcsxRai5erFzmJApiaJ40hiIAhCQULgTjpUpySEoqABKtAyhYbCaMLGbrCATyZ1Egq0JBK3IWhKoCQmjHRapGKhQE8msEQE6FwGdgjhA5BuAIyi6F4wCqWGgGIFZ8kmgABqSGRCagY0QkSi7B5rrIYqRnUIAwAsT+7k7hSG7CJAMR6kAqDgEAt8QgABBg5yhSTK1j4AUGaglnQIJKJQro2zTSqEIGpQKEskRi6IpMeoRmL0SDNAYEICBnwD5/xMAysxSkIELgelwkCiYiCAS6wqSCmMjqCnZYBPbiFCBADy2S6EJQ7kxqTWuJIuyYMg4kRyhGqk0kZsj+0rQCIuEGoEBuKywnSKaNTjSq0jyK5OKQDWwi6G9QIEiKbVwArKMqIJbkCSeMgJZoiqTHuNasAA5kigg6DjJmgEZ4EmijKIdsCCcJAiY+yn4KZAogyqxSMOPA6oJRZiJgZkoo0oABx0DEQyzKDqHS7BJkywWmDK8FY4DioACKACDj5GriCC4Gzb7MLqwLaIIOKSti8GNgTkUKcEcFc0woItCFLkZoAwECrhUihUpkRiZiEeZA58jqmKCKrN58TCogCiQrAAYAICTHaGKCQLaSZOsAZ3BWsARKMEKm/gJiBIiEPpJo54im6l0kJkQqREguEmUS4MzjpQKKIc54EiQCkSwMLAXnQSJiBGKEZENhIyIIMAcA8shyoCMsgsQooxD+gkRqpQe0TqErEOgmWSAyRiIkAiAIYU8BAQNozqallnAOIAQM6hwsCGPBAuYFQuCgQuUyxqigI4UykDYAIqxGYGYuWLqiDHYIovgOoW7UYCrVBHbEJiAAAkQhhsTFoyiQKqFWcApgiACgTjhEZ+HCYgjqjGgCwH7KpGDjgO6IcyROvA5gJiaiaDPMYkWm5BgoZ1CqIxDgbs4sFmRMAizLFOkDZf/FQAKAihElJwTOLMKGAcZjDepmUSoCQEJgryNFLmZMekInZMr8iqRsAyQib1BEQKrq3iVnjKgjUKTrBCQCxUgkRiAc7EMkBAqF1LAGBkFmZhRkZB5siicBwqBiSGouikEzSiQCOobgRHNQQjJGoj4GwEyoIkQN7wKNfspJKmaMagYEjGgmXMGyhiJkXEAhBqRQISMiWOAmiQJyHGgGIAJAqqMIKCfILkhz4AxsAwxks6KgMwpQxOoijXIzkCwinICqBmBmbANgSFbBiKKwCgK40qhGBNykppSmMMekwkaBQmBCQK4rgGBq5mIAd2qcaKdIQL7Com4C0USoYwDAN8I/xwAE7soVZGaEZCKsBsBAnM0pAqZwnm5JKsiIUS4Ogc78FipgUqRAAiAgboKAJG8CAOpv6ph4AoRg76ICdsIcgGYGoGyzyLJKlQCuiiRmrkbEzBkNIWLsAAt0lG5ISFRkgo0mvNqsIA7hhoQiBCwjYAgywkTC/GLCIHcOSP5CxjRjBAUqIlBk61Js54wF5mLIYGbCQQIijdCygA6uBNssw0yCAWbQ9kSG4cLiDO5OYAIoI+EKbkAkL+Auwoyph0Dkq+pgJ6QYoGIGAKon4KsKEWBuzAhuIuIGso3csEYGpQYGoebMYFTyUGgMb0mi6BAkRqCGbGNAijZEIC/AIrLzv8RADHTOQGxvp2hmxklMrpRgukcoo4gNKAMgTChC+ApmSdCypJKohE7tQ4iKAacMomUDoUKoDGYIKgYow+CiAmpgZ+B2hoSsR0RpK2Mko2ZQyHZSBPaCgK8KieQigEwlJygKqA1RMkRHIUIKrMNJBkHmyOoA48VC6BQoBChC6QsoxoCuoGfkr2cJKkIOoebqwKeqXKBuEkj6BqBuRsmAskJQIOqmoILdCO4iIlSwGmhKgJwAwwCGcArFQmZc6AZkirwCYGJEqmgjqKum1LIiEqFq6oAmp00AtswNegakco4MiTbCTgGi6kwmGETCLANIZByqBCAeAYLginAOwWImYcAEgAEmyMJ0Y4BCRCpiLuEn7k6o8waF7q6ILi9UxDoGTKljAK6KVMh4JsYNqCZCAIqFxnBClCDY7mICDAXC5JiqAgzwJtBoSqCc9oJGJConoEY6ZqJCLqYM9z6KaK8WgKhjUIUvDG4mlAEkKuZc4SZCYkSc5CIigEUcaSbGANkuiIppioCgIsRg48FGbAZoJiKytoalcyqQcipSaS7nDTaikKByjonsA2DiogkANqJQBWpCoEQMhW5nDBDESe7CBVY0kqAIZkhCMhAgMhgmIMNAroKANqKOPKrKYH4DBKxnRmkjDCDCM9DgcwQqAlDFKmpOSawnQgAQQUQurF6skIQABUAikGAN7sziwUMkyi6EjKbhyvDHJINAKAMoCO8rSGAzSgi65wiuZomKPAqQsCMAaoQYwKpi0AFupsQAHOUAJmLFyu0GhmDV7oiCQQMkjC6iHKZA0uyCwCagb24GjPTj4mgnYkEyq0SEKwUGNmaVIDMIJqAJhDQiDqGkI2AAWChgRq5BzrTiCmSVaoSiQSLhCmpiDWKhyqgCwKboZmMmEaovAm5uhwUq704BM0ooAiuQzTbEpoYBBm7wGoiloqLlWGpsjisBkigAAkZF6shC4cagiCaCIRL4SC4OsMoiNIJm4SZn5Kai4IkvKpBwK4huACNNgKqCKEeBJqKqUNzHwAWAKkMk3mxkDrAFGnBIKkZJ7owmBQLlRCbASKNs3mwOoIampMLvxSaqgEp3hA44JogsKw4tAqcNxK6KNgaApKPqUNhkblaoCSLwCEMgze7AptxhJwDKbJ4kKAaApApoVTbIpmhK7CqXKObi4LqnQISzQCAjIiDj7JpI7BJuZpCEfuJKhYSqI2UWbAAK+AReLIZsBlF2DiYExuFiJwCEJqDeaMckwvYiIiYUaEaE8D/GAiZiDGgKFLhnAkSEJoVqihEs68JEzGgwDish0npACkABxuSmEikDJE4sEQawkgJskuYFqsiqRiZqawbR5oBAPuLIcjJOZQYKIW9iSEAoJL/DwAhykBUsY2aMjOJ4BkKhzDMkFAJBwqCqXiiC5EwsENA8TmAkSqJBYwRkIibiOBLBLkYAPCMgYCsIIKMAVLsKRCBAZuUnjEDrYpSMaEgjbFwqKIKC5VywiGpSYeMAQCgQJKICZMJmZE2qoIouvCLnBaZkFCwzCqAyRyECIwj0o+CEAihKMuRccGoKTOEONuSarADDqABXJYqkQo0uhiIFI0VCLkyisKIe5UbkAOauiCaIaOPBorMKJGdASmzOpDZCooXILmACxSErwgRYwGhnSCVK8GBDQFHqyKpWZUMgSixSIKwPJIBD6BUuhIIqJEMsQwkujgm25wxuapKFKuk/xMAAYTfACGJoVG4ATHB3AgxgwAMhFioEom/ImmzS5OMJroRqhSLBDGrExrAmHjiKICAqYi6KBWrEVX7iRigqkkTuSgV/RkBCIA4ohgYlOuOAFOAwEiIlCyB6TmxNrtDuEmzHLJrwUCIgjrBEArJVKsCKaGZmIqAF50kgK2IMPkZEAGoSLHLGxSICRaKsHLBuyghhSnBGUOpIsycA3i0OgK6R8oRmgSLFQmYMpmpsm2zO5ARuC3EHBGiDDS4vTiirIlDkQwzst8ZBAqAIayAVLicEFGhIAnpQoqjiw2FWaEgCKpH2yGJAhyEKZoUiqkCebFKkomoKuiIULA4Frq6vP8VAAO7vFISy0MB+4syqBMZwIhgkfscgiM5kIUeowia6jEZBSkh6GLAGagyuyUAmSIAyppiowsDgLivgIskqVg16ZqJoqu7VQGKI4neGQMLhAoCqXKo6wkxBQg5s3q4ApvOQogDOoSJY+kYiYIYAoEaJrmQjTTIMYmijpi4aYGacgLrChC5ijgFCTCR+Y4SoDC6FguABK2oUBIBCaF6sQCduUIphikRmXPgGqExrDQYqmOIqhgqhg0TiZAKoNxYoxwhE72ME7uLNACJJQDfCBKpgFGhyFGQ6SkgA4gZsUKMwZguo3SaAggJg64SCpdbgYAckwiLsnmhOJAYypgIrf3/FQCkC2EEywsCuZ4TEps0ks8IQboBQZiqc5DoCRETiAqRJo6SgA6BQrkSAosVzDGalUySgRyDiIyyeKEaAoCL0AmbJokJN7isILDMORWYKhXImyCSi0EGqggUya0oIwIKFUHaGRHOEDCRKiWZItoqgCKNM4OPEpAb+FiAAACJoLusHCeaKCeovSKJ3CAxyDEw+Zk4oQApgwEZIv6LKRMGG6RhqaIKuwJooTiGGhXaGIADjwMAmjOZqZFpw0mRkIm6yho16UlDwZwZocs5JKlRAti6SMI4mQMgige6rikkgQojJpyRCK+QYpkCEhmFyziIph2TAhyTgIyRcqgYAZFMABQAmgn6WJCIQBXKCoC4nUIBmlKC24skugFYsxsSBL6qQTLBIXnDG4DQGimHGiKxWbKeAELZICGxKhPqCQoVmjIRyImb8CuSmGND2IsQua1IE6lAI9qsMKKZOBeKiDXbujkUgRlxsxuYwow6hxskkFmynJhzuSAhoBAg6pg6o0EQI/mKmLwQGKBzJ8oJCMicIQGRaJOpnQGAiBA0ukMh+48JAiKAOAeamKGPiCQLFQA4xI2RIMgwI8AgIOiaOIQMQwDImwHNIIGYcwPqCgHJq2KBmDES/BoSqRFZoQlJtKyaEjMhsmSomAjMq3LDSIKJU8mICweLEhGZAELcAFqyLAAYAAMgobyKyByEKkEnmZwBqc0oIqBAMtGdEKEKgBWKISbLuyAQIoF5kyqouK85hikEiVPQiokFiiETqBonrKlIlCsSIuqpmcoxkgpmE+uJAcqbMCSpYhHInBGwKhCECTIXzJkIIAOZZKGICNiaSpRZkykl+ogIg4skA5siAs+YUKBIAQK7urqfI5gQZ5G6KrDKDBMDGRURvZsCC5NxkZF4sZyKAEEEChSIqgCqzVOAQpFIlp2ASLE5I6IbNPCMiDPLYgGAqoDbiyTLIDaivBGwzSgRgBA0sMwJsRsSRJAgNfmciQlDkVACgasI+YyCSCOIVbiIjAWrIhKwOCT6mggAEwBEqCAigs0KyKo4hBlFEtmKoNsMEyGYQhL6moGbA2iEiUGxvZoYIQQ4F4CYgMm9EBglAnGjjLBQ0BgRggkxwp+AI5s1EYHMi8qKGARQJBHLqKzMGBESMSOZy5zaKwYgqEIS+ZsJgWIAIzDriRngiSgTcxGCrIiZAo0lCDGTm85BoSkVEbi9qa0AEAhXEriamvubIRKAQzL5iwC6IDgHCjCjrqoSSYNiIdGaCZOvASAxQkXIickxujIgQ4RY6IgKkyo1kRj7iqqYiUBGgom6mt6KEhEQIhLqqpsIETAlIiHq26oKZ4gRE7q8UMiJGFMiIYOt2TukKiIiRKiRjrksAA4AklklAIC72a4ACIhUE6ipm/u8MIFBESGwrwi5CRA3oDkXu6kJGAFyIpScCgObzDExJCWpCtlKoEgSQQMK0JmOAio0FDH4icuJuRo2EgKYya7aCgEhEDEGu8mImwRAAYFxsLqpjENEAQKMmBCA7SAhNASsiRAKI1IRMQG434oAIVNEEsgK+5uZCDFjBAigy7ydmSEhIxSA2piemDEYhDABFKytqngDIyDAmiCR3SokMiW6oByxIEMjNQDBu78gCUREgpCc6KuZmXASIpG6zKvbCRAiJEPIyxnIEDADAFkHqbnLMDQSY6CbMAH6DRAjQ5GMgJwyiTQIJ5iqmtoZGQAHAEJFERLpu7yKgAk2MoOa6r2rizIhNhKoy5rKDDJCghom0KuYmHETNLCrEIDoiyJDRpCJiKoBUAMTUqK7vatCUTUhIsDbzooAiTJzgpiZ28uaECFEA4m4rLqrQUIzoTkn+4uAGUQSIsiaCRLbC0QxNqGKyYogQiRCEqC7zosiM0cRIsnavYmYGDQ0AZq6zqyJESE1AaC7qs2IISQBGTb4mpCZQhI0kZkImcC/IDJHkQkByhkBQgFEkom4v4oSQjRCg725vaubUgJDBLi9mswYgDOAUqK/CgCAGTeRiSToi4AIMTI3yAoSkdwZEignmIiYmUChUBNFuBu4zIpDIjoABgAkIai+zLqrSBQhQ5HdmpibiFIDKBL8mwABgGITmDHBrpkZI0NEkZsRkNyMM5hlkooAqQkDCkYzk58BurswJFMjQ8Ctu8uqSSUQEpHcm6mqmVSCIJHcmgARCHORCiPJqqBQFSKTrQgC2ykjAmUDmpmqmjaQUjQEu5qq+xkzUiNDwLu8ras5NCEkAdycmaqJQ4IYFPqMABGZYoKbIbgMACk3ICTbmSC4HEMBUhaoqgm4SoNoNDPQicqZu0AhNhECifi9iggSEFED27u6vCkSBCEl2q0YEJhIFYAAuZqYnXMREriZAMoKIiNyJaCamLk6BEg1RZCMqKqqEDFHEQE5AAcACPqtCAABGCSgvKuqrhAyEhgm+4wRAAgwFKgAsK2aKlcBEJGauJwwgnIzFaiKypwoEihHJJCciMmrKSYSIDPou5rbDEIREDCy74mYiRA0AgmhvbqKIRBEMjGQzwCKQREhFamAzAkSYgJDg6rarCiCOmcSgauAy8sYUxISJNiaurydQgEBQJLdm5CaIEMUiIHNqpkQEEIzMSP7HJAYITMkulLZqyEiBHETmai8qySgciUiuQnJ24lBEzBDk5+5uqwwFBFAAtytkJoAQjOBAc2rqhoBNDQRIOkMkQtBQxHAUbC8KCGEODeIqJnMQKhBFEKigMyJmoAwRwCBisC+6P8KAAkCCDQ06qqJ+gohERFCg86JmImJUxMIIaGdyighIiQJBKyKARklUQMYsduNBJlBMySooKzKIKBRJhCZCtrMiRIAMjbYuovomyExIzEXy6wImZhRJJAQAsuaKhQBUgEI25kACiUyFgiRzKpSuVAjJJCYysoakklFAoCa2cyaEQFDU6GtqrquEREkIRXpqoiagCAmgCACy6oKJYAhhBDpCQCqYiIUiTLMqlqiG0UzkRrQmawgAXEDIJjaq8wZAUEiE67JqvuIAQgkIsi6iqqqHCWBIUHgChAEGREzAPmKmBAzGUeBAbvILKMrJjITEPgKuggAchQQqQD7uhkBSAAABQBCsYq82KwQkUgiBMyJmpgIETIyBAG9AFkDCEACkK6ZEBASYhQYoAzbQbk4JFOAAKvZiogjRgEAmqC/qhCQJHGjisuQrIkJQQQh2AroiRCSGSEmoauAMCMhU6CIqqkJCEUhgguwnZiZciETGcGq2xmIcxQBmYj6qggAKCQXmqiaygsAGjUjkK8BrAAgkCkkA8wJORIgMAYakqyIEQBCQwKRIPsQyQAIFCGgMJKtEMFQgRCIybuAzCFDE8oAy6qsGgI4BoCpCdsZgQAIIgKpqiEAUxKYgAG8GhJCE1IzkKCfkouCQCMyoBHaiRGaJDmFmsmJ6oohQ6INErrbmhHq/wAACySTvYmQmJkQEaEJE6oAmWKCKKAKoK0xAppUEQSME5wDCwMpJ4qQCqAo2HGCGLkpyLo5BI5DoAsCqskMASmEGcELATCTGhWZkQyyrjISMZIbo54IgBkic6IqohyRCwBUkQiwEMkRqmSIIbkRrao4oSsWmJoRoKmdAAEQAJCamhEBIAAgAAC7iAkiIyISqRC8AAkBQSMCm4AMkq0iIAeJkQqoiAlikkC5EK2pSbIMRaiZGJDKixEikSCwDKhJkSCQCBGZqQ0SMiWyS5Sbq0GgCUURqCC6mpkIADFUk44juwmKiTGhKpRh6AkJAokACTK5qAuhGYFBuTCQIq5EIQAJAAmAmTqTGgQIQ5Cxj5EKQQMKQaGOkQuiGzI3yCmTDMEaASDKYrkgrKIslAg4koidoTraM4oBEQmQAAoGGQIAmgGMExAiIgQKop8BoDkVmCigicwqAwyCZbALBLsJAIAyuSmREM0AKZQICYKbkNspoDUJshyooAkJFzqzKJuUHBIBURCEjgKpKgaKMcI700DpGROcAhqTq1TQGQHrIICBq2TIGRCRrVjACQGBCzHACoCQuzEAF4qBgh+ECYigKSEFm1qGiVHwOMkiiwWqMMIouROOEoCaUMKMAQHLMAPMQJGrCAGsI6gAmZAMAqgpgAgxwDComEKYFakQkiqBQRMAAgCZdLA5hJ2EHLNaooskqoIr4ioDiZlE6gohsKphkYsTutspyIgiyQAJoZsRmYEqI7iZDMNrEoAICRIQkQo0CDSVDqRLwEG4KQULoTHrIQCKGhetARCoiyS5kDHKukHaGJIaoSC4CRAAoCARoAoA2UECkgoAkoqYnBQ6J5ActDrYMckZBQqRILgghQqQJL4gEaqoZKoYAMuoUMBAAACpELgJmQEZIxKoGAGcECKCCCGRGpIQmJkRKYUgjKQssSGqmhWLlTnYIAKbiGPZGACIiVLJEAC6qyjQQACBmiDZiQgQAVKCiJqAuhoiBQohkSyiIKkglB2WG4AokTsHmoEDAAMAAYwzmQAYoZsA2SgAIZAgkQzIC6kymRIQApmamoENEgGZCQIMIhKZEYKbEAAaE0G6JZqhKBGgUrgRCsEpEgAAIdAqoZoQEQIIAqqYrQAaBAAAEAGZmgCQCiMAoBmRKYIhmSKRmgGIijIAAiiCC4KakRoDChKBCQAAmzK5ABiyDQGaGAEimiKpqrmbChIRATGYiKmZqAkiAQgAmQAhAggykJoAqAlBkCIhlSyxKbAakhuFCZEgAakQkKoZkZoQEgEJA4yQiqkZEQgRAYCYqgCZIBESkBCYgAkAESOQCYiZCAkEPMNIyBGoEKJKshnJM7oJEYKcE6mpGQGqQKAQAgAAAAmQChGwCRCRGAARgAgBiKkQmBAAAoAgkBCYAIgBCJgTCpUpoAAIqREJoEigkSqECwAhwBiCmpkAgJkhqRCAiJkQkBoAEIiZCAEIIqkAiJgJAQAREQEQAIgJkimxYqojjAKbMskznKIMhImAIKAaAqmZCICKEZAQkAiQIKgKEACYCQEAEREBmhGpEAEQERGACJEJABARmgQdozmRihGaAQmRGRIAAAC6GYGpCCGCiQCYqoqYGBEAARCYAJuAmRAigAmCmQkSmQgBCAGYEQEBEJgRCqQpgQgAgIEZoQgQAYgAkAgAmQkAkAkQEZAYoAmoAAkRACCQCJCaCYAJI/3/AACZEKAgAACAEIEIgIgBAACCPMQ5wSCgIJEakgmYEBEACICaCJiKEBGCiQCYqQqQEIIJAQgAmZkAkBgCiRCQEAGBCYCYAICIEYAAkBihKbAxmJAIiZAZgQAAAQmQCgGZAAACmYCaiJgJABKYCIiYmZoAIAKIGJGJmJggkBABEJEJgYkRiBGJgUC5QqkACBCRKZAwkIgAiQGZgBiAiQAJEZmAiJmZCAARAYEJqIiaiQgSEQABmJiZCAEYABCBCQGYIZkCi4E4yDGomAkIoBkBIAIJAqkIqAoikSmSCgGpGYGJGAEQAQCQigARABGZAACRGAEhqQAJkJgIgCCYAIn9/wAAihOLApmQiRGpIQARgBCQihCpIBGRIJEJkIoIgBkCABGAAJmAEIAIgAmAEAAhEKIKgAgIgBAAAgmYkRySEJCIEKgAgJkQEACYAJgAiAkBAAABiACZCACACAAAAIAAAQCAiACAAIAAABAAEBCSCgAACIEZAAIJmIGLARABACCgCYCZABCBiQiYAJkAAQAAAQCAqSmQEAABAJiZiBgBARABAIiZABARAACAiYAIAQACmYEJkRkRAACImImYGQEQkYkAmYkAEAABiQCAmYAACAAQAICJCQABAAAAAAAAEAEAgIiIiAAAEZAAiACIEAABCICZiIkQAAGYCJiIiIgAAgAAAACJCICJAIAIAAAAgJgAAAAAAAAAAAAAAAEAgAAAAAAAAAAAAAABCAEIkAgBAAAAgAmICIEIgIiImIiICAAAiAiAiYgACAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA== '
𝚖𝚞𝚜𝚒𝚌 = 𝚋𝚊𝚜𝚎𝟼𝟺.𝚋𝟼𝟺𝚍𝚎𝚌𝚘𝚍𝚎(𝚖𝚞𝚜𝚒𝚌.strip()) # CC-BY 3.0 freesound.org/people/elektroproleter/sounds/157569/
with tempfile.NamedTemporaryFile(delete=False) as f: f.write(𝚖𝚞𝚜𝚒𝚌)
from rlbot.agents.base_agent import BaseAgent
for outer in frames:
agent = outer.frame.f_locals.get('self', None)
if not isinstance(agent, BaseAgent): continue
def get_state(p):
nonlocal jmp
j = p.game_cars[agent.index].𝚍𝚘𝚞𝚋𝚕𝚎_𝚓𝚞𝚖𝚙𝚎𝚍
if jmp != j and time.time() > 1554043427:
jmp = j # If you are going to use sound, at least do it tastefully and put some effort in.
if jmp: 𝚠𝚒𝚗𝚜𝚘𝚞𝚗𝚍.𝙿𝚕𝚊𝚢𝚂𝚘𝚞𝚗𝚍(f.name, buffer + bitrate*len(𝚖𝚞𝚜𝚒𝚌))
return orig(p)
agent.get_output, orig, jmp, bitrate, buffer = get_state, agent.get_output, False, 5, 10453
did_you_have_fun_yet = True # no performance concern :)
break
return self(selfie)
return property(fun)
def flatten(self) -> Vector2:
return Vector2(self.x, self.y)
@proparty # Returns the euclidian distance of this vector
def length(self) -> float:
return math.sqrt(self.x**2 + self.y**2 + self.z**2)
@property
def size(self) -> float:
return self.length
def normalize(self):
self /= self.size
@property
def normalized(self) -> "Vector3":
# A shorthand to get a normalized (length 1) copy of this vector.
return self / self.size
class life(int):
math = False
love = life()
assert love <3
| 179.829596
| 32,751
| 0.90215
| 1,226
| 40,102
| 29.386623
| 0.344209
| 0.00458
| 0.002332
| 0.002221
| 0.056179
| 0.05146
| 0.04985
| 0.048518
| 0.046131
| 0.038415
| 0
| 0.059509
| 0.071418
| 40,102
| 222
| 32,752
| 180.63964
| 0.907997
| 0.014737
| 0
| 0.372093
| 0
| 0.005814
| 0.840123
| 0.8144
| 0
| 1
| 0
| 0
| 0.005814
| 1
| 0.232558
| false
| 0.005814
| 0.046512
| 0.151163
| 0.540698
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 8
|
d570b2ed9d88b7f8ecd775fdc0f1c4bfc6d69fe6
| 2,514
|
py
|
Python
|
doc/manual/include/approximation/anidiff_tests_potential.py
|
NegriLuca/pigasus
|
d5057b771f81cfa05bb08ea4b0fd99088150cd7a
|
[
"MIT"
] | 1
|
2021-10-21T17:15:26.000Z
|
2021-10-21T17:15:26.000Z
|
doc/manual/include/approximation/anidiff_tests_potential.py
|
NegriLuca/pigasus
|
d5057b771f81cfa05bb08ea4b0fd99088150cd7a
|
[
"MIT"
] | null | null | null |
doc/manual/include/approximation/anidiff_tests_potential.py
|
NegriLuca/pigasus
|
d5057b771f81cfa05bb08ea4b0fd99088150cd7a
|
[
"MIT"
] | null | null | null |
import numpy as np
import pylab as pl
from numpy import pi, sin, cos
# ***************************************************
#
# TEST 102
#
# ***************************************************
eps = 1.e-1
m = 2 ; n = 1
f = lambda y : sin( n*pi * ( 4*y*(1-y) ) )
df = lambda y : (-4*pi*n*y + 4*pi*n*(-y + 1))*cos(4*pi*n*y*(-y + 1))
u = lambda x,y : eps*sin(n*pi*y)*cos(m*pi*x) + f(y)
t = np.linspace(0.,1.,200)
x,y = np.meshgrid(t,t)
# ...
# plot of f
# ...
pl.plot(t, f(t))
pl.title("plot of $f(y) = \sin( n\pi ( 4 y (1-y) ) )$ ")
pl.show()
# ...
# ...
# plot of df
# ...
pl.plot(t, df(t))
pl.title("plot of $df(y) = -4 \pi n y + 4 \pi n (-y + 1)) \cos(4 \pi n y (-y + 1) $ ")
pl.show()
# ...
# ...
# plot of u
# ...
pl.contourf(x,y,u(x,y))
pl.colorbar()
pl.title("plot of $u (x,y) = \epsilon \sin(n \pi y) \cos(m \pi x) + \sin( n \pi ( 4 y (1-y) ) )$ ")
pl.show()
# ...
# ...
# plot of u - limit
# ...
eps = 0.
pl.contourf(x,y,u(x,y))
pl.colorbar()
pl.title("plot of $u (x,y) = \sin( n \pi ( 4 y (1-y) ) )$ ")
pl.show()
# ...
# ***************************************************
# ***************************************************
#
# TEST 101
#
# ***************************************************
eps = 1.e-1
m = 2 ; n = 1
u = lambda x,y : sin(n*pi*y) + eps * cos(m*pi*x) * sin(n*pi*y)
t = np.linspace(0.,1.,201)
x,y = np.meshgrid(t,t)
# ...
# plot of u
# ...
pl.contourf(x,y,u(x,y))
pl.colorbar()
pl.title("plot of $u (x,y) = \sin(n \pi y) ( 1 + \epsilon \cos(m \pi x) )$ ")
pl.show()
# ...
# ...
# plot of u limit
# ...
eps = 0.
pl.contourf(x,y,u(x,y))
pl.colorbar()
pl.title("plot of $u (x,y) = \sin(n \pi y) $ ")
pl.show()
# ...
# ***************************************************
# ***************************************************
#
# TEST 110
#
# ***************************************************
eps = 1.e-4
alpha = 2.0
m = 2
n = 1
u = lambda x,y : eps*sin(n*pi*y)*cos(m*pi*x) + sin(alpha*(y**2 - y)*cos(m*pi*x) + n*pi*y)
t = np.linspace(0.,1.,201)
x,y = np.meshgrid(t,t)
# ...
# plot of u
# ...
pl.contourf(x,y,u(x,y))
pl.colorbar()
pl.title("plot of $u (x,y) = \epsilon \sin(n \pi y) \cos(m \pi x) + \sin( \\alpha (y^2 - y) \cos(m \pi x) + n \pi y)$ ")
pl.show()
# ...
# ...
# plot of u - limit
# ...
eps = 0.
pl.contourf(x,y,u(x,y))
pl.colorbar()
pl.title("plot of $u (x,y) = \sin( \\alpha (y^2 - y) \cos(m \pi x) + n \pi y)$ ")
pl.show()
# ...
# ***************************************************
| 19.338462
| 120
| 0.378679
| 429
| 2,514
| 2.219114
| 0.097902
| 0.05042
| 0.07563
| 0.066176
| 0.845588
| 0.803571
| 0.781513
| 0.771008
| 0.704832
| 0.704832
| 0
| 0.031421
| 0.202466
| 2,514
| 129
| 121
| 19.488372
| 0.443392
| 0.301909
| 0
| 0.581818
| 0
| 0.127273
| 0.31162
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.054545
| 0
| 0.054545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
63642ac70eaf0f484bdb1bc41c87c8a1bb0eba97
| 14,562
|
py
|
Python
|
deepsleep/data_loader.py
|
UKBWorks/AccSleepNet
|
7c203590b1cfa24f12c197ea065d4dd308c92bf0
|
[
"Apache-2.0"
] | 2
|
2020-11-19T10:32:56.000Z
|
2021-02-23T07:29:32.000Z
|
deepsleep/data_loader.py
|
UKBWorks/AccSleepNet
|
7c203590b1cfa24f12c197ea065d4dd308c92bf0
|
[
"Apache-2.0"
] | 1
|
2021-01-28T23:16:09.000Z
|
2021-01-28T23:16:09.000Z
|
deepsleep/data_loader.py
|
famousgrouse/AccSleepNet
|
c343ac0411bd4ae3e2a0e82b47c398608d5ceb23
|
[
"Apache-2.0"
] | 2
|
2020-11-19T10:33:03.000Z
|
2021-02-23T07:29:33.000Z
|
import os
import numpy as np
from deepsleep.sleep_stage import print_n_samples_each_class
from deepsleep.utils import get_balance_class_oversample
import re
class NonSeqDataLoader(object):
def __init__(self, data_dir, n_folds, fold_idx):
self.data_dir = data_dir
self.n_folds = n_folds
self.fold_idx = fold_idx
def _load_npz_file(self, npz_file):
"""Load data and labels from a npz file."""
with np.load(npz_file) as f:
if ("x" in f.keys()):
data = f["x"]
labels = f["y"]
sampling_rate = f["fs"]
else:
data = f["svm"]
labels = f["annotation"]
sampling_rate = f["fs"]
return data, labels, sampling_rate
def _load_npz_list_files(self, npz_files):
"""Load data and labels from list of npz files."""
data = []
labels = []
fs = None
for npz_f in npz_files:
print ("Loading {} ...".format(npz_f))
tmp_data, tmp_labels, sampling_rate = self._load_npz_file(npz_f)
if fs is None:
fs = sampling_rate
elif fs != sampling_rate:
raise Exception("Found mismatch in sampling rate.")
data.append(tmp_data)
if len(tmp_labels.shape) > 1:
tmp_labels=np.squeeze(tmp_labels)
labels.append(tmp_labels)
data = np.vstack(data)
labels = np.hstack(labels)
return data, labels
def _load_cv_data(self, list_files):
"""Load training and cross-validation sets."""
# Split files for training and validation sets
val_files = np.array_split(list_files, self.n_folds)
train_files = np.setdiff1d(list_files, val_files[self.fold_idx])
# Load a npz file
print("Load training set:")
data_train, label_train = self._load_npz_list_files(train_files)
print(" ")
print("Load validation set:")
data_val, label_val = self._load_npz_list_files(val_files[self.fold_idx])
print(" ")
# Reshape the data to match the input of the model - conv2d
data_train = np.squeeze(data_train)
data_val = np.squeeze(data_val)
data_train = data_train[:, :, np.newaxis, np.newaxis]
data_val = data_val[:, :, np.newaxis, np.newaxis]
# Casting
data_train = data_train.astype(np.float32)
label_train = label_train.astype(np.int32)
data_val = data_val.astype(np.float32)
label_val = label_val.astype(np.int32)
return data_train, label_train, data_val, label_val
def load_train_data(self, binary_sleep =True):
# Remove non-mat files, and perform ascending sort
allfiles = os.listdir(self.data_dir)
npzfiles = []
for idx, f in enumerate(allfiles):
if ".npz" in f:
npzfiles.append(os.path.join(self.data_dir, f))
npzfiles.sort()
subject_files = []
for idx, f in enumerate(allfiles):
if self.fold_idx < 10:
#pattern = re.compile("[a-zA-Z0-9]*0{}[1-9]E0\.npz$".format(self.fold_idx))
# the purpose of this line code is to specify which file should be considered as the validation
# dataset
pattern = re.compile("[a-zA-Z0-9. -_]*0{}[1-9]E0\.npz$".format(self.fold_idx))
else:
pattern = re.compile("[a-zA-Z0-9]*{}[1-9]E0\.npz$".format(self.fold_idx))
if "VALIDATION" in f:
subject_files.append(os.path.join(self.data_dir, f))
if len(subject_files) == 0:
for idx, f in enumerate(allfiles):
if self.fold_idx < 10:
pattern = re.compile("[a-zA-Z0-9]*0{}[1-9]J0\.npz$".format(self.fold_idx))
else:
pattern = re.compile("[a-zA-Z0-9]*{}[1-9]J0\.npz$".format(self.fold_idx))
if "VALIDATION" in f:
subject_files.append(os.path.join(self.data_dir, f))
train_files = list(set(npzfiles) - set(subject_files))
train_files.sort()
subject_files.sort()
# Load training and validation sets
print("\n========== [Fold-{}] ==========\n".format(self.fold_idx))
print("Load training set:")
data_train, label_train = self._load_npz_list_files(npz_files=train_files)
print(" ")
print("Load validation set:")
data_val, label_val = self._load_npz_list_files(npz_files=subject_files)
print(" ")
# Reshape the data to match the input of the model - conv2d
data_train = np.squeeze(data_train)
data_val = np.squeeze(data_val)
data_train = data_train[:, :, np.newaxis, np.newaxis]
data_val = data_val[:, :, np.newaxis, np.newaxis]
# Casting
data_train = data_train.astype(np.float32)
label_train = label_train.astype(np.int32)
data_val = data_val.astype(np.float32)
label_val = label_val.astype(np.int32)
if binary_sleep:
label_train[label_train > 1] = 1
label_val[label_val > 1] = 1
print("Training set: {}, {}".format(data_train.shape, label_train.shape))
print_n_samples_each_class(label_train, binary=binary_sleep)
print(" ")
print("Validation set: {}, {}".format(data_val.shape, label_val.shape))
print_n_samples_each_class(label_val, binary=binary_sleep)
print(" ")
# Use balanced-class, oversample training set
x_train, y_train = get_balance_class_oversample(
x=data_train, y=label_train
)
print("Oversampled training set: {}, {}".format(
x_train.shape, y_train.shape
))
print_n_samples_each_class(y_train, binary=binary_sleep)
print(" ")
return x_train, y_train, data_val, label_val
def load_test_data(self):
# Remove non-mat files, and perform ascending sort
allfiles = os.listdir(self.data_dir)
npzfiles = []
for idx, f in enumerate(allfiles):
if ".npz" in f:
npzfiles.append(os.path.join(self.data_dir, f))
npzfiles.sort()
subject_files = []
for idx, f in enumerate(allfiles):
if self.fold_idx < 10:
pattern = re.compile("[a-zA-Z0-9]*0{}[1-9]E0\.npz$".format(self.fold_idx))
else:
pattern = re.compile("[a-zA-Z0-9]*{}[1-9]E0\.npz$".format(self.fold_idx))
if "VALIDATION" in f:
subject_files.append(os.path.join(self.data_dir, f))
subject_files.sort()
print("\n========== [Fold-{}] ==========\n".format(self.fold_idx))
print("Load validation set:")
data_val, label_val = self._load_npz_list_files(subject_files)
# Reshape the data to match the input of the model
data_val = np.squeeze(data_val)
data_val = data_val[:, :, np.newaxis, np.newaxis]
# Casting
data_val = data_val.astype(np.float32)
label_val = label_val.astype(np.int32)
return data_val, label_val
class SeqDataLoader(object):
def __init__(self, data_dir, n_folds, fold_idx):
self.data_dir = data_dir
self.n_folds = n_folds
self.fold_idx = fold_idx
def _load_npz_file(self, npz_file):
"""Load data and labels from a npz file."""
with np.load(npz_file) as f:
data = f["svm"]
labels = f["annotation"]
sampling_rate = f["fs"]
return data, labels, sampling_rate
def _load_npz_list_files(self, npz_files):
"""Load data and labels from list of npz files."""
data = []
labels = []
fs = None
for npz_f in npz_files:
print("Loading {} ...".format(npz_f))
tmp_data, tmp_labels, sampling_rate = self._load_npz_file(npz_f)
if fs is None:
fs = sampling_rate
elif fs != sampling_rate:
raise Exception("Found mismatch in sampling rate.")
# Reshape the data to match the input of the model - conv2d
tmp_data = np.squeeze(tmp_data)
tmp_data = tmp_data[:, :, np.newaxis, np.newaxis]
# # Reshape the data to match the input of the model - conv1d
# tmp_data = tmp_data[:, :, np.newaxis]
# Casting
tmp_data = tmp_data.astype(np.float32)
tmp_labels = tmp_labels.astype(np.int32)
data.append(tmp_data)
labels.append(tmp_labels)
return data, labels
def _load_cv_data(self, list_files):
"""Load sequence training and cross-validation sets."""
# Split files for training and validation sets
val_files = np.array_split(list_files, self.n_folds)
train_files = np.setdiff1d(list_files, val_files[self.fold_idx])
# Load a npz file
print("Load training set:")
data_train, label_train = self._load_npz_list_files(train_files)
print(" ")
print("Load validation set:")
data_val, label_val = self._load_npz_list_files(val_files[self.fold_idx])
print(" ")
return data_train, label_train, data_val, label_val
def load_test_data(self):
# Remove non-mat files, and perform ascending sort
allfiles = os.listdir(self.data_dir)
npzfiles = []
for idx, f in enumerate(allfiles):
if ".npz" in f:
npzfiles.append(os.path.join(self.data_dir, f))
npzfiles.sort()
# Files for validation sets
val_files = np.array_split(npzfiles, self.n_folds)
val_files = val_files[self.fold_idx]
print("\n========== [Fold-{}] ==========\n".format(self.fold_idx))
print("Load validation set:")
data_val, label_val = self._load_npz_list_files(val_files)
return data_val, label_val
def load_train_data(self, n_files=None, binary_sleep = True):
# Remove non-mat files, and perform ascending sort
allfiles = os.listdir(self.data_dir)
npzfiles = []
for idx, f in enumerate(allfiles):
if ".npz" in f:
npzfiles.append(os.path.join(self.data_dir, f))
npzfiles.sort()
if n_files is not None:
npzfiles = npzfiles[:n_files]
subject_files = []
for idx, f in enumerate(allfiles):
if self.fold_idx < 10:
pattern = re.compile("[a-zA-Z0-9]*0{}[1-9]E0\.npz$".format(self.fold_idx))
else:
pattern = re.compile("[a-zA-Z0-9]*{}[1-9]E0\.npz$".format(self.fold_idx))
if "VALIDATION" in f:
subject_files.append(os.path.join(self.data_dir, f))
train_files = list(set(npzfiles) - set(subject_files))
train_files.sort()
subject_files.sort()
# Load training and validation sets
print("\n========== [Fold-{}] ==========\n".format(self.fold_idx))
print("Load training set:")
data_train, label_train = self._load_npz_list_files(train_files)
print(" ")
print("Load validation set:")
data_val, label_val = self._load_npz_list_files(subject_files)
print(" ")
if binary_sleep:
for y in label_train:
y[y > 0] = 1
for y in label_val:
y[y > 0] = 1
print("Training set: n_subjects={}".format(len(data_train)))
n_train_examples = 0
for d in data_train:
print(d.shape)
n_train_examples += d.shape[0]
print("Number of examples = {}".format(n_train_examples))
print_n_samples_each_class(np.hstack(label_train), binary=binary_sleep)
print(" ")
print("Validation set: n_subjects={}".format(len(data_val)))
n_valid_examples = 0
for d in data_val:
print(d.shape)
n_valid_examples += d.shape[0]
print ("Number of examples = {}".format(n_valid_examples))
print_n_samples_each_class(np.hstack(label_val), binary=binary_sleep)
print (" ")
return data_train, label_train, data_val, label_val
@staticmethod
def load_subject_data(data_dir, subject_idx):
# Remove non-mat files, and perform ascending sort
allfiles = os.listdir(data_dir)
subject_files = []
for idx, f in enumerate(allfiles):
if subject_idx < 10:
pattern = re.compile("[a-zA-Z0-9]*0{}[1-9]E0\.npz$".format(subject_idx))
else:
pattern = re.compile("[a-zA-Z0-9]*{}[1-9]E0\.npz$".format(subject_idx))
if "VALIDATION" in f:
subject_files.append(os.path.join(data_dir, f))
# Files for validation sets
if len(subject_files) == 0 or len(subject_files) > 2:
raise Exception("Invalid file pattern")
def load_npz_file(npz_file):
"""Load data and labels from a npz file."""
with np.load(npz_file) as f:
data = f["svm"]
labels = f["annotation"]
sampling_rate = f["fs"]
return data, labels, sampling_rate
def load_npz_list_files(npz_files):
"""Load data and labels from list of npz files."""
data = []
labels = []
fs = None
for npz_f in npz_files:
print ("Loading {} ...".format(npz_f))
tmp_data, tmp_labels, sampling_rate = load_npz_file(npz_f)
if fs is None:
fs = sampling_rate
elif fs != sampling_rate:
raise Exception("Found mismatch in sampling rate.")
# Reshape the data to match the input of the model - conv2d
tmp_data = np.squeeze(tmp_data)
tmp_data = tmp_data[:, :, np.newaxis, np.newaxis]
# # Reshape the data to match the input of the model - conv1d
# tmp_data = tmp_data[:, :, np.newaxis]
# Casting
tmp_data = tmp_data.astype(np.float32)
tmp_labels = tmp_labels.astype(np.int32)
data.append(tmp_data)
labels.append(tmp_labels)
return data, labels
print("Load data from: {}".format(subject_files))
data, labels = load_npz_list_files(subject_files)
return data, labels
| 37.530928
| 111
| 0.57602
| 1,916
| 14,562
| 4.146138
| 0.075679
| 0.029079
| 0.033233
| 0.028197
| 0.85788
| 0.844285
| 0.824396
| 0.798968
| 0.798464
| 0.764602
| 0
| 0.012208
| 0.308131
| 14,562
| 387
| 112
| 37.627907
| 0.776278
| 0.107265
| 0
| 0.754448
| 0
| 0.003559
| 0.084474
| 0.019107
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053381
| false
| 0
| 0.017794
| 0
| 0.124555
| 0.163701
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
63735b230f253b622d50d6278e994171e220f7cd
| 6,259
|
py
|
Python
|
test/test_message.py
|
wenbo1188/ozone
|
962f9bfbbe4ea29eb7cb50eff8058806efee7143
|
[
"MIT"
] | null | null | null |
test/test_message.py
|
wenbo1188/ozone
|
962f9bfbbe4ea29eb7cb50eff8058806efee7143
|
[
"MIT"
] | 8
|
2018-03-24T01:44:42.000Z
|
2018-08-25T06:43:49.000Z
|
test/test_message.py
|
wenbo1188/ozone
|
962f9bfbbe4ea29eb7cb50eff8058806efee7143
|
[
"MIT"
] | null | null | null |
from . import app, client, prepare
from ozone.config import TestConfig
from . import (assertTrue_status, assertFalse_status, assertTrue_content, assertFalse_content,
assertTrue_session, assertFalse_session)
import pytest
from ozone.utils.db_util import get_db
def test_show_message_without_login(client):
rv = client.get('/message/1', follow_redirects=True)
assertTrue_status(rv, 200)
assertTrue_content(rv, "You need login to continue")
@pytest.mark.parametrize(("url", "content", "username", "password"), (
('/message/0', "再翻也没有啦", TestConfig.USERNAME1, TestConfig.PASSWORD),
('/message/1', "再翻也没有啦", TestConfig.USERNAME1, TestConfig.PASSWORD),
('/message/100', "再翻也没有啦", TestConfig.USERNAME1, TestConfig.PASSWORD),
('/message/0', "再翻也没有啦", TestConfig.USERNAME2, TestConfig.PASSWORD),
('/message/1', "再翻也没有啦", TestConfig.USERNAME2, TestConfig.PASSWORD),
('/message/100', "再翻也没有啦", TestConfig.USERNAME2, TestConfig.PASSWORD)
))
def test_show_message_login(client, prepare, url, content, username, password):
prepare.login(username, password)
rv = client.get(url)
assertTrue_status(rv, 200)
assertTrue_content(rv, content)
@pytest.mark.parametrize(("url", "content", "username", "password"), (
('/message/0', "再翻也没有啦", TestConfig.USERNAME1, TestConfig.PASSWORD),
('/message/1', "test message", TestConfig.USERNAME1, TestConfig.PASSWORD),
('/message/100', "再翻也没有啦", TestConfig.USERNAME1, TestConfig.PASSWORD),
('/message/0', "再翻也没有啦", TestConfig.USERNAME2, TestConfig.PASSWORD),
('/message/1', "test message", TestConfig.USERNAME2, TestConfig.PASSWORD),
('/message/100', "再翻也没有啦", TestConfig.USERNAME2, TestConfig.PASSWORD)
))
def test_show_message_login_message(client, prepare, url, content, username, password):
prepare.login(username, password)
prepare.add_message(5, repeat=False)
rv = client.get(url)
assertTrue_status(rv, 200)
if content == "test message":
for i in range(5):
assertTrue_content(rv, "{}_{}".format(content, i))
else:
assertTrue_content(rv, content)
def test_add_message_without_login(client, app):
rv = client.get('/message/add', follow_redirects=True)
assertTrue_status(rv, 200)
assertFalse_content(rv, "提交")
assertTrue_content(rv, "You need login to continue")
rv = client.post('/message/add', data={
"content":"test message"
}, follow_redirects=True)
assertTrue_status(rv, 200)
assertTrue_content(rv, "You need login to continue")
with app.app_context():
db = get_db()
assert db.cursor().execute("select * from message where content = ?",
["test message",]).fetchone() is None
@pytest.mark.parametrize(("username", "password"), (
(TestConfig.USERNAME1, TestConfig.PASSWORD),
(TestConfig.USERNAME2, TestConfig.PASSWORD)
))
def test_add_message_login(prepare, client, app, username, password):
prepare.login(username, password)
rv = client.get('/message/add')
assertTrue_status(rv, 200)
assertTrue_content(rv, "提交")
rv = client.post('/message/add', data={
"content":"test message"
}, follow_redirects=True)
assertTrue_status(rv, 200)
assertTrue_content(rv, "You have successfully leave a message")
with app.app_context():
db = get_db()
assert db.cursor().execute('select * from message where content = ?',
["test message",]).fetchone() is not None
@pytest.mark.parametrize(("url", "status_code", "content"), (
("/message/update/1", 200, "You need login to continue"),
("/message/update", 404, "")
))
def test_update_message_without_login(client, url, status_code, content):
rv = client.get(url, follow_redirects=True)
assertTrue_status(rv, status_code)
assertTrue_content(rv, content)
@pytest.mark.parametrize(("username", "password"), (
(TestConfig.USERNAME1, TestConfig.PASSWORD),
(TestConfig.USERNAME2, TestConfig.PASSWORD)
))
def test_update_message_login(client, prepare, app, username, password):
prepare.login(username, password)
prepare.add_message(num=1, content="test message old")
with app.app_context():
db = get_db()
res = db.cursor().execute("select timestamp from message where content = ?",
["test message old",]).fetchone()
assert res is not None
rv = client.get('/message/update', follow_redirects=True)
assertTrue_status(rv, 404)
rv = client.get('/message/update/1', follow_redirects=True)
assertTrue_status(rv, 404)
rv = client.get('/message/update/{}'.format(res["timestamp"]), follow_redirects=True)
assertTrue_status(rv, 200)
assertTrue_content(rv, "test message old")
rv = client.post('/message/update/{}'.format(res["timestamp"]), data={
"content":"test message new"
}, follow_redirects=True)
assertTrue_status(rv, 200)
assertTrue_content(rv, "You have successfully udpate a message")
assertTrue_content(rv, "test message new")
@pytest.mark.parametrize(("url", "status_code", "content"), (
("/message/delete", 404, ""),
("/message/delete/1", 200, "You need login to continue")
))
def test_delete_message_without_login(client, url, status_code, content):
rv = client.get(url, follow_redirects=True)
assertTrue_status(rv, status_code)
assertTrue_content(rv, content)
@pytest.mark.parametrize(("username", "password"), (
(TestConfig.USERNAME1, TestConfig.PASSWORD),
(TestConfig.USERNAME2, TestConfig.PASSWORD)
))
def test_delete_message_login(client, prepare, app, username, password):
prepare.login(username, password)
prepare.add_message()
with app.app_context():
db = get_db()
res = db.cursor().execute("select timestamp from message where content = ?",
["test message",]).fetchone()
assert res is not None
rv = client.get('/message/delete')
assertTrue_status(rv, 404)
rv = client.get('/message/delete/1')
assertTrue_status(rv, 404)
rv = client.get('/message/delete/{}'.format(res["timestamp"]), follow_redirects=True)
assertTrue_status(rv, 200)
assertTrue_content(rv, "You have successfully delete a message")
assertFalse_content(rv, "test message")
| 38.875776
| 95
| 0.690685
| 740
| 6,259
| 5.705405
| 0.112162
| 0.03837
| 0.068214
| 0.075557
| 0.833965
| 0.806727
| 0.803884
| 0.760066
| 0.711985
| 0.671009
| 0
| 0.01911
| 0.163924
| 6,259
| 160
| 96
| 39.11875
| 0.787693
| 0
| 0
| 0.589552
| 0
| 0
| 0.196548
| 0
| 0
| 0
| 0
| 0
| 0.283582
| 1
| 0.067164
| false
| 0.246269
| 0.037313
| 0
| 0.104478
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
638a930b9b8bd7b76f8cfa983a6a93f78551aa97
| 7,190
|
py
|
Python
|
objects/CSCG/_3d/forms/trace/base/numbering/Naive.py
|
mathischeap/mifem
|
3242e253fb01ca205a76568eaac7bbdb99e3f059
|
[
"MIT"
] | 1
|
2020-10-14T12:48:35.000Z
|
2020-10-14T12:48:35.000Z
|
objects/CSCG/_3d/forms/trace/base/numbering/Naive.py
|
mathischeap/mifem
|
3242e253fb01ca205a76568eaac7bbdb99e3f059
|
[
"MIT"
] | null | null | null |
objects/CSCG/_3d/forms/trace/base/numbering/Naive.py
|
mathischeap/mifem
|
3242e253fb01ca205a76568eaac7bbdb99e3f059
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
@author: Yi Zhang.
Department of Aerodynamics
Faculty of Aerospace Engineering
TU Delft, Delft, Netherlands
"""
from screws.freeze.main import FrozenOnly
from tools.linear_algebra.gathering.regular.chain_matrix.main import Gathering_Matrix, Gathering_Vector
class _3dCSCG_Trace_Form_Numbering_Naive(FrozenOnly):
def __init__(self, tf):
self._tf_ = tf
self._mesh_ = tf.mesh
self._freeze_self_()
def _3dCSCG_0Trace(self):
"""Do the numbering if it is a trace 0-form:
:class:`_3dCSCG.form.standard._0_trace._0Trace`.
:returns: A tuple of 4 outputs:
1. (Gathering_Matrix) -- The global numbering in local elements.
2. dict -- The global numbering in local trace elements.
3. (int) -- Number of dofs in this core.
4. (None,...) -- Extra numbering information.
"""
if self._tf_.numbering._parameters_ == dict():
return self._0Trace_no_parameters()
else:
raise NotImplementedError()
def _0Trace_no_parameters(self):
"""
Do the numbering if it is a trace 0-form:
:class:`_3dCSCG.form.standard._0_trace._0Trace`.
:returns: A tuple of 4 outputs:
1. (Gathering_Matrix) -- The global numbering in local elements.
2. dict -- The global numbering in local trace elements.
3. (int) -- Number of dofs in this core.
4. (None,...) -- Extra numbering information.
"""
GM = dict()
GM_TEW = dict()
local_num_dofs = 0
extraInfo = None
num_basis_onside = self._tf_.num.basis_onside
NBO = [num_basis_onside['N'], num_basis_onside['W'], num_basis_onside['B']]
type_amount_dict = self._mesh_.trace.elements.___PRIVATE_find_type_and_amount_numbered_before___()
for i in self._mesh_.trace.elements:
t_e_i = self._mesh_.trace.elements[i]
am_NS, am_WE, am_BF = type_amount_dict[i]
start_num = am_NS * NBO[0] + am_WE * NBO[1] + am_BF * NBO[2]
GM_TEW[i] = range(start_num, start_num + num_basis_onside[t_e_i.CHARACTERISTIC_side])
local_num_dofs += num_basis_onside[t_e_i.CHARACTERISTIC_side]
MAP = self._mesh_.trace.elements.map
for i in MAP:
vector = tuple()
for t_j in MAP[i]:
vector += (GM_TEW[t_j],)
GM[i] = Gathering_Vector(i, vector)
GM = Gathering_Matrix(GM, mesh_type='_3dCSCG')
for i in GM_TEW:
GM_TEW[i] = Gathering_Vector(i, GM_TEW[i])
return GM, GM_TEW, local_num_dofs, extraInfo
def _3dCSCG_1Trace(self):
"""Do the numbering if it is a trace 1-form:
:class:`_3dCSCG.form.standard._1_trace._1Trace`.
:returns: A tuple of 4 outputs:
1. (Gathering_Matrix) -- The global numbering in local elements.
2. dict -- The global numbering in local trace elements.
3. (int) -- Number of dofs in this core.
4. (None,...) -- Extra numbering information.
"""
if self._tf_.numbering._parameters_ == dict():
return self._0Trace_no_parameters()
else:
raise NotImplementedError()
def _1Trace_no_parameters(self):
"""
Do the numbering if it is a trace 1-form:
:class:`_3dCSCG.form.standard._1_trace._1Trace`.
:returns: A tuple of 4 outputs:
1. (Gathering_Matrix) -- The global numbering in local elements.
2. dict -- The global numbering in local trace elements.
3. (int) -- Number of dofs in this core.
4. (None,...) -- Extra numbering information.
"""
GM = dict()
GM_TEW = dict()
local_num_dofs = 0
extraInfo = None
num_basis_onside = self._tf_.num.basis_onside
NBO = [num_basis_onside['N'], num_basis_onside['W'], num_basis_onside['B']]
type_amount_dict = self._mesh_.trace.elements.___PRIVATE_find_type_and_amount_numbered_before___()
for i in self._mesh_.trace.elements:
t_e_i = self._mesh_.trace.elements[i]
am_NS, am_WE, am_BF = type_amount_dict[i]
start_num = am_NS * NBO[0] + am_WE * NBO[1] + am_BF * NBO[2]
GM_TEW[i] = range(start_num, start_num + num_basis_onside[t_e_i.CHARACTERISTIC_side])
local_num_dofs += num_basis_onside[t_e_i.CHARACTERISTIC_side]
MAP = self._mesh_.trace.elements.map
for i in MAP:
vector = tuple()
for t_j in MAP[i]:
vector += (GM_TEW[t_j],)
GM[i] = Gathering_Vector(i, vector)
GM = Gathering_Matrix(GM, mesh_type='_3dCSCG')
for i in GM_TEW:
GM_TEW[i] = Gathering_Vector(i, GM_TEW[i])
return GM, GM_TEW, local_num_dofs, extraInfo
def _3dCSCG_2Trace(self):
"""Do the numbering if it is a trace 2-form:
:class:`_3dCSCG.form.standard._2_trace._2Trace`.
:returns: A tuple of 4 outputs:
1. (Gathering_Matrix) -- The global numbering in local elements.
2. dict -- The global numbering in local trace elements.
3. (int) -- Number of dofs in this core.
4. (None,...) -- Extra numbering information.
"""
if self._tf_.numbering._parameters_ == dict():
return self._2Trace_no_parameters()
else:
raise NotImplementedError()
def _2Trace_no_parameters(self):
"""
Do the numbering if it is a trace 2-form:
:class:`_3dCSCG.form.standard._2_trace._2Trace`.
:returns: A tuple of 4 outputs:
1. (Gathering_Matrix) -- The global numbering in terms of local mesh elements.
2. dict -- The global numbering in terms of local trace elements.
3. (int) -- Number of dofs in this core.
4. (None,...) -- Extra numbering information.
"""
GM = dict()
GM_TEW = dict()
local_num_dofs = 0
extraInfo = None
num_basis_onside = self._tf_.num.basis_onside
NBO = [num_basis_onside['N'], num_basis_onside['W'], num_basis_onside['B']]
type_amount_dict = self._mesh_.trace.elements.___PRIVATE_find_type_and_amount_numbered_before___()
for i in self._mesh_.trace.elements:
t_e_i = self._mesh_.trace.elements[i]
am_NS, am_WE, am_BF = type_amount_dict[i]
start_num = am_NS * NBO[0] + am_WE * NBO[1] + am_BF * NBO[2]
GM_TEW[i] = range(start_num, start_num + num_basis_onside[t_e_i.CHARACTERISTIC_side])
local_num_dofs += num_basis_onside[t_e_i.CHARACTERISTIC_side]
MAP = self._mesh_.trace.elements.map
for i in MAP:
vector = tuple()
for t_j in MAP[i]:
vector += (GM_TEW[t_j],)
GM[i] = Gathering_Vector(i, vector)
GM = Gathering_Matrix(GM, mesh_type='_3dCSCG')
for i in GM_TEW:
GM_TEW[i] = Gathering_Vector(i, GM_TEW[i])
return GM, GM_TEW, local_num_dofs, extraInfo
| 34.567308
| 106
| 0.600556
| 966
| 7,190
| 4.144928
| 0.112836
| 0.026224
| 0.073427
| 0.05994
| 0.907093
| 0.907093
| 0.896354
| 0.883367
| 0.883367
| 0.883367
| 0
| 0.016393
| 0.295828
| 7,190
| 208
| 107
| 34.567308
| 0.774442
| 0.317107
| 0
| 0.851064
| 0
| 0
| 0.006664
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074468
| false
| 0
| 0.021277
| 0
| 0.170213
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8923e024edd6df93e1253d22b384cf2d1a564183
| 80
|
py
|
Python
|
pydrot/drot/__init__.py
|
vienmai/drot
|
a5ffa08d6911c7245984d3b0e5835f90767801f4
|
[
"MIT"
] | null | null | null |
pydrot/drot/__init__.py
|
vienmai/drot
|
a5ffa08d6911c7245984d3b0e5835f90767801f4
|
[
"MIT"
] | null | null | null |
pydrot/drot/__init__.py
|
vienmai/drot
|
a5ffa08d6911c7245984d3b0e5835f90767801f4
|
[
"MIT"
] | null | null | null |
from .solver import drot
from .solver import sinkhorn
from .solver import PDHG
| 16
| 28
| 0.8
| 12
| 80
| 5.333333
| 0.5
| 0.46875
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1625
| 80
| 4
| 29
| 20
| 0.955224
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
8937527e6f55cef6b12117e82af5d511d005940f
| 20,771
|
py
|
Python
|
Manuscript_plots.py
|
DingLyu/Investigating-and-Modeling-the-Dynamics-of-Long-Ties
|
aa37c3d5c85a8d1696db3dda7dcb22782b737d17
|
[
"MIT"
] | null | null | null |
Manuscript_plots.py
|
DingLyu/Investigating-and-Modeling-the-Dynamics-of-Long-Ties
|
aa37c3d5c85a8d1696db3dda7dcb22782b737d17
|
[
"MIT"
] | null | null | null |
Manuscript_plots.py
|
DingLyu/Investigating-and-Modeling-the-Dynamics-of-Long-Ties
|
aa37c3d5c85a8d1696db3dda7dcb22782b737d17
|
[
"MIT"
] | null | null | null |
# /usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: ding lyu
@email: dylan_lyu@sjtu.edu.cn
"""
from utils import *
# Fig.~2
def DynamicTrend(interactions):
Data = read('Results/Graph_Season_TR_{}.txt'.format(interactions))
avg2, avg3, avg4, avg5, avg6 = [], [], [], [], []
GH = nx.read_gexf('Graph/Season/Frequency/G_frequency_season_1.gexf')
Large = max(nx.connected_components(GH), key=len)
for data in Data:
if data.count('-1') == 0:
if data[0] in Large and data[1] in Large:
if data[2] == '2':
avg2.append(list(map(int, map(float, data[10:]))))
if data[2] == '3':
avg3.append(list(map(int, map(float, data[10:]))))
if data[2] == '4':
avg4.append(list(map(int, map(float, data[10:]))))
if data[2] == '5':
avg5.append(list(map(int, map(float, data[10:]))))
if int(data[2]) >= 6 and int(data[2]) < 100:
avg6.append(list(map(int, map(float, data[10:]))))
fig = plt.figure(figsize=(7, 7))
ax = plt.axes()
X = [1, 2, 3, 4, 5, 6, 7, 8]
H = [[] for i in range(8)]
Y = []
Err = []
for i in range(8):
for j in range(len(avg2)):
H[i].append(mt.log(avg2[j][i]+1))
Y.append(np.mean(H[i]))
Err.append(mean_confidence_interval(H[i]))
plt.errorbar(x=X, y=Y, yerr=Err, fmt="o",color="#34495e", ls='-', label='$=2$',
ecolor='#34495e',marker='^', markersize=10, linewidth=2, elinewidth=0.5, capsize=4)
X = [1, 2, 3, 4, 5, 6, 7, 8]
H = [[] for i in range(8)]
Y = []
Err = []
for i in range(8):
for j in range(len(avg3)):
H[i].append(mt.log(avg3[j][i] + 1))
Y.append(np.mean(H[i]))
Err.append(mean_confidence_interval(H[i]))
plt.errorbar(x=X, y=Y, yerr=Err, fmt="o", color="#2980b9", ls='-', label='$=3$',
ecolor='#2980b9', marker='s', markersize=10, linewidth=2, elinewidth=0.5, capsize=4)
X = [1, 2, 3, 4, 5, 6, 7, 8]
H = [[] for i in range(8)]
Y = []
Err = []
for i in range(8):
for j in range(len(avg4)):
H[i].append(mt.log(avg4[j][i] + 1))
Y.append(np.mean(H[i]))
Err.append(mean_confidence_interval(H[i]))
plt.errorbar(x=X, y=Y, yerr=Err, fmt="o", color="#7f8c8d", ls='-', label='$=4$',
ecolor='#7f8c8d', marker='p', markersize=10, linewidth=2, elinewidth=0.5, capsize=4)
X = [1, 2, 3, 4, 5, 6, 7, 8]
H = [[] for i in range(8)]
Y = []
Err = []
for i in range(8):
for j in range(len(avg5)):
H[i].append(mt.log(avg5[j][i] + 1))
Y.append(np.mean(H[i]))
Err.append(mean_confidence_interval(H[i]))
plt.errorbar(x=X, y=Y, yerr=Err, fmt="o", color="#c0392b", ls='-', label='$=5$',
ecolor='#c0392b', marker='H', markersize=10, linewidth=2, elinewidth=0.5, capsize=4)
X = [1, 2, 3, 4, 5, 6, 7, 8]
H = [[] for i in range(8)]
Y = []
Err = []
for i in range(8):
for j in range(len(avg6)):
H[i].append(mt.log(avg6[j][i] + 1))
Y.append(np.mean(H[i]))
Err.append(mean_confidence_interval(H[i]))
plt.errorbar(x=X, y=Y, yerr=Err, fmt="o", color="#8e44ad", ls='-', label='$\geq6$',
ecolor='#8e44ad', marker='8', markersize=10, linewidth=2, elinewidth=0.5, capsize=4)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.xticks([1, 2, 3, 4, 5, 6, 7, 8], ['1', '2', '3', '4', '5', '6', '7', '8'], fontsize=20)
plt.yticks(fontsize=20)
plt.xlabel("Phase", fontsize=25)
plt.ylabel("$log$(Interaction {})".format(interactions), fontsize=25)
legend = plt.legend(frameon=False, loc='upper right', title='Tie Range in Phase 1', fontsize=20)
legend.get_title().set_fontsize(fontsize=20)
if interactions == 'Duration':
plt.ylim([0,6])
plt.subplots_adjust(left=0.11, bottom=0.11, right=0.98, top=0.97)
# plt.savefig('Plots/Manuscript/Mainresults_Duration.pdf', format='pdf')
if interactions == 'Frequency':
plt.ylim([0,2.5])
plt.subplots_adjust(left=0.145, bottom=0.11, right=0.98, top=0.97)
# plt.savefig('Plots/Manuscript/Mainresults_Frequency.pdf', format='pdf')
plt.show()
# Fig.~3a
def PersistentProbability():
Data = read('Results/Graph_Season_TR_Frequency.txt')
avg2, avg3, avg4, avg5, avg6 = [], [], [], [], []
GH = nx.read_gexf('Graph/Season/Frequency/G_frequency_season_1.gexf')
Large = max(nx.connected_components(GH), key=len)
for data in Data:
if data.count('-1') == 0:
if data[0] in Large and data[1] in Large:
if data[2] == '2':
avg2.append(list(map(int, map(float, data[10:]))))
if data[2] == '3':
avg3.append(list(map(int, map(float, data[10:]))))
if data[2] == '4':
avg4.append(list(map(int, map(float, data[10:]))))
if data[2] == '5':
avg5.append(list(map(int, map(float, data[10:]))))
if int(data[2]) >= 6 and int(data[2]) <= 100:
avg6.append(list(map(int, map(float, data[10:]))))
H = []
Count = [0 for _ in range(7)]
for data in avg2:
for _ in range(7):
if data[_ + 1] == 0:
Count[_] += 1
for _ in range(7):
H.append([1 - Count[_] / len(avg2), '$=2$', int(_ + 2)])
Count = [0 for _ in range(7)]
for data in avg3:
for _ in range(7):
if data[_ + 1] == 0:
Count[_] += 1
for _ in range(7):
H.append([1 - Count[_] / len(avg3), '$=3$', int(_ + 2)])
Count = [0 for _ in range(7)]
for data in avg4:
for _ in range(7):
if data[_ + 1] == 0:
Count[_] += 1
for _ in range(7):
H.append([1 - Count[_] / len(avg4), '$=4$', int(_ + 2)])
Count = [0 for _ in range(7)]
for data in avg5:
for _ in range(7):
if data[_ + 1] == 0:
Count[_] += 1
for _ in range(7):
H.append([1 - Count[_] / len(avg5), '$=5$', int(_ + 2)])
Count = [0 for _ in range(7)]
for data in avg6:
for _ in range(7):
if data[_ + 1] == 0:
Count[_] += 1
for _ in range(7):
H.append([1 - Count[_] / len(avg6), '$\geq6$', int(_ + 2)])
H.append([1, '$=2$', 1])
H.append([1, '$=3$', 1])
H.append([1, '$=4$', 1])
H.append([1, '$=5$', 1])
H.append([1, '$\geq6$', 1])
fig = plt.figure(figsize=(7, 7))
ax = plt.axes()
X =[1,2,3,4,5,6,7,8]
Y = [1,0.5581061427155416,0.4903426618125226,0.4811593128854058,0.42835042473760754,0.39420158284622064,0.3522280583114866,0.34217701568950765]
plt.plot(X,Y,color='#34495e',label='$=2$',marker='^',markersize=10, linewidth=2)
Y = [1,0.29881545026224554,0.23813227842474882,0.2341763712329985,0.1989287936705485,0.18211618810560937,0.15236909947550892,0.1509023024268824]
plt.plot(X, Y, color='#2980b9', label='$=3$', marker='s', markersize=10, linewidth=2)
Y = [1,0.3226499719678565,0.266305363483461,0.25456301002927806 , 0.22232604497601693, 0.20460350090325796, 0.17548121846383857, 0.17124525010901392]
plt.plot(X, Y, color='#7f8c8d', label='$=4$', marker='p', markersize=10, linewidth=2)
Y = [1,0.4494784876140808,0.42046936114732725, 0.39374185136897, 0.3539765319426337, 0.34680573663624514, 0.3158409387222947, 0.29269882659713164]
plt.plot(X, Y, color='#c0392b', label='$=5$', marker='H', markersize=10, linewidth=2)
Y = [1,0.6121212121212121,0.5454545454545454, 0.5212121212121212, 0.49696969696969695, 0.49696969696969695, 0.48484848484848486,0.4666666666666667]
plt.plot(X, Y, color='#8e44ad', label='$\geq6$', marker='8', markersize=10, linewidth=2)
plt.xlabel('Phase', fontsize=25)
plt.ylabel('Persistence Probability', fontsize=25)
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
plt.ylim([0, 1.05])
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
legend = plt.legend(frameon=False, loc='upper right', title='Tie Range in Phase 1', fontsize=20)
legend.get_title().set_fontsize(fontsize=20)
plt.subplots_adjust(left=0.145, bottom=0.11, right=0.98, top=0.97)
plt.savefig('Plots/Manuscript/PersistentProbability.pdf', format='pdf')
plt.show()
# Fig.~3b&c
def IncrementInteraction(interactions):
Data = read('Results/Graph_Season_TR_{}.txt'.format(interactions))
avg2, avg3, avg4, avg5, avg6 = [], [], [], [], []
GH = nx.read_gexf('Graph/Season/Frequency/G_frequency_season_1.gexf')
Large = max(nx.connected_components(GH),key=len)
for data in Data:
if data.count('-1') == 0:
if data[0] in Large and data[1] in Large:
if data[2] == '2':
avg2.append(list(map(int, map(float, data[10:]))))
if data[2] == '3':
avg3.append(list(map(int, map(float, data[10:]))))
if data[2] == '4':
avg4.append(list(map(int, map(float, data[10:]))))
if data[2] == '5':
avg5.append(list(map(int, map(float, data[10:]))))
if int(data[2]) >= 6 and int(data[2]) <= 100:
avg6.append(list(map(int, map(float, data[10:]))))
plt.figure(figsize=(7, 7))
ax = plt.axes()
X = [1, 2, 3, 4, 5, 6, 7, 8]
H = [[] for i in range(8)]
Y = []
Err = []
for i in range(8):
for j in range(len(avg2)):
if avg2[j][i] != 0:
H[i].append(mt.log(avg2[j][i] + 1)-mt.log(avg2[j][0]+1))
Y.append(np.mean(H[i]))
Err.append(mean_confidence_interval(H[i]))
plt.errorbar(x=X, y=Y, yerr=Err, fmt="o", color="#34495e", ls='-', label='$=2$',
ecolor='#34495e', marker='^', markersize=10, linewidth=2, elinewidth=0.5, capsize=4)
X = [1, 2, 3, 4, 5, 6, 7, 8]
H = [[] for i in range(8)]
Y = []
Err = []
for i in range(8):
for j in range(len(avg3)):
if avg3[j][i] != 0:
H[i].append(mt.log(avg3[j][i] + 1)-mt.log(avg3[j][0] + 1))
Y.append(np.mean(H[i]))
Err.append(mean_confidence_interval(H[i]))
plt.errorbar(x=X, y=Y, yerr=Err, fmt="o", color="#2980b9", ls='-', label='$=3$',
ecolor='#2980b9', marker='s', markersize=10, linewidth=2, elinewidth=0.5, capsize=4)
X = [1, 2, 3, 4, 5, 6, 7, 8]
H = [[] for i in range(8)]
Y = []
Err = []
for i in range(8):
for j in range(len(avg4)):
if avg4[j][i] != 0:
H[i].append(mt.log(avg4[j][i] + 1)-mt.log(avg4[j][0] + 1))
Y.append(np.mean(H[i]))
Err.append(mean_confidence_interval(H[i]))
plt.errorbar(x=X, y=Y, yerr=Err, fmt="o", color="#7f8c8d", ls='-', label='$=4$',
ecolor='#7f8c8d', marker='p', markersize=10, linewidth=2, elinewidth=0.5, capsize=4)
X = [1, 2, 3, 4, 5, 6, 7, 8]
H = [[] for i in range(8)]
Y = []
Err = []
for i in range(8):
for j in range(len(avg5)):
if avg5[j][i] != 0:
H[i].append(mt.log(avg5[j][i] + 1)-mt.log(avg5[j][0] + 1))
Y.append(np.mean(H[i]))
Err.append(mean_confidence_interval(H[i]))
plt.errorbar(x=X, y=Y, yerr=Err, fmt="o", color="#c0392b", ls='-', label='$=5$',
ecolor='#c0392b', marker='H', markersize=10, linewidth=2, elinewidth=0.5, capsize=4)
X = [1, 2, 3, 4, 5, 6, 7, 8]
H = [[] for i in range(8)]
Y = []
Err = []
for i in range(8):
for j in range(len(avg6)):
if avg6[j][i] != 0:
H[i].append(mt.log(avg6[j][i] + 1)-mt.log(avg6[j][0] + 1))
Y.append(np.mean(H[i]))
Err.append(mean_confidence_interval(H[i]))
plt.errorbar(x=X, y=Y, yerr=Err, fmt="o", color="#8e44ad", ls='-', label='$\geq6$',
ecolor='#8e44ad', marker='8', markersize=10, linewidth=2, elinewidth=0.5, capsize=4)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.xlabel('Phase', fontsize=25)
plt.ylabel('$\Delta log$(Interaction {})'.format(interactions), fontsize=25)
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
plt.ylim([-0.4, 0.8])
legend = plt.legend(frameon=False, loc='upper right', title='Tie Range in Phase 1', fontsize=20)
legend.get_title().set_fontsize(fontsize=20)
if interactions == 'Duration':
plt.ylim([-0.5, 1.5])
plt.subplots_adjust(left=0.205, bottom=0.11, right=0.98, top=0.97)
plt.savefig('Plots/Manuscript/IncrementDuration.pdf', format='pdf')
if interactions == 'Frequency':
plt.ylim([-0.5, 1.5])
plt.subplots_adjust(left=0.205, bottom=0.11, right=0.98, top=0.97)
plt.savefig('Plots/Manuscript/IncrementFrequency.pdf', format='pdf')
plt.show()
# Fig.~4
def TransitionTR(a, b):
Data = np.load('Results/trevolution/Frequency_Season_{}_{}.npy'.format(a, b))
plt.figure(figsize=(7,7))
DS = []
for data in Data:
if -1 in data or 101 in data[:2] or 100 in data[:2]:
continue
else:
x = list(map(int,map(float, data[:4])))
if x[0] > 6:
x[0] = 6
if x[1] > 6:
x[1] = 6
DS.append(x)
A = [[0 for j in range(6)] for i in range(5)]
B = [[0 for j in range(5)] for i in range(5)]
for data in DS:
if data[2] > 0:
if data[3] == 0:
A[int(data[0])-2][5] += 1
else:
if int(data[1]) >= 6:
A[int(data[0])-2][4] +=1
else:
A[int(data[0])-2][int(data[1])-2] += 1
for data in DS:
B[int(data[1])-2][int(data[0])-2] += 1
EA = [sum(A[i][:5]) for i in range(5)]
NormA = [[0 for j in range(5)] for i in range(5)]
for i in range(5):
for j in range(5):
NormA[i][j] = A[i][j]/EA[i]
NormA = np.array(NormA)
h = sns.heatmap(data=NormA, cmap='YlOrBr', annot=True, annot_kws={'size':15}, cbar=False)
cb = h.figure.colorbar(h.collections[0])
cb.ax.tick_params(labelsize=20)
plt.xticks([0.5,1.5,2.5,3.5,4.5],['$2$', '$3$', '$4$', '$5$', '$\geq6$'], fontsize=20)
plt.yticks([0.5,1.5,2.5,3.5,4.5],['$2$', '$3$', '$4$', '$5$', '$\geq6$'], fontsize=20)
plt.xlabel('Tie Range in Phase %d'%b, fontsize=25)
plt.ylabel('Tie Range in Phase %d'%a, fontsize=25)
plt.subplots_adjust(left=0.12, bottom=0.11, right=1, top=0.98)
plt.savefig('Plots/Manuscript/Transition_P{}_P{}.pdf'.format(a,b), format='pdf')
# plt.show()
# Fig.~5,S8,
def InteractionTR(interactions, a, b):
Data = np.load('Results/trevolution/{}_Season_{}_{}.npy'.format(interactions, a, b))
plt.figure(figsize=(7, 7))
DS = []
for data in Data:
if -1 in data or 101 in data[:2] or 100 in data[:2]:
continue
else:
x = list(map(int, map(float, data)))
if x[0] > 6:
x[0] = 6
if x[1] > 6:
x[1] = 6
DS.append(x)
All = []
for i in range(5):
for j in range(5):
H = []
for data in DS:
if data[2] != 0:
if data[0] == i + 2 and data[1] == j + 2:
H.append(mt.log(data[3] + 1))
All.append([i + 2, j + 2, mt.log(data[3] + 1)])
df = pd.DataFrame(All, columns=['Tie Range in Phase a', 'Tie Range in Phase b', '$Ln({})$ in Phase b'.format(interactions)])
A = df.groupby(['Tie Range in Phase a', 'Tie Range in Phase b'])['$Ln({})$ in Phase b'.format(interactions)].mean()
NormA = np.array(list(A)).reshape(5,5)
h = sns.heatmap(data=NormA, cmap='YlOrBr', annot=True, annot_kws={'size': 15}, cbar=False)
cb = h.figure.colorbar(h.collections[0])
cb.ax.tick_params(labelsize=20)
plt.xticks([0.5, 1.5, 2.5, 3.5, 4.5], ['$2$', '$3$', '$4$', '$5$', '$\geq6$'], fontsize=20)
plt.yticks([0.5, 1.5, 2.5, 3.5, 4.5], ['$2$', '$3$', '$4$', '$5$', '$\geq6$'], fontsize=20)
plt.xlabel('Tie Range in Phase {}'.format(b), fontsize=25)
plt.ylabel('Tie Range in Phase {}'.format(a), fontsize=25)
plt.title('$log$({}) in Phase {}'.format(interactions, b), fontsize=30)
plt.subplots_adjust(left=0.12, bottom=0.11, right=1, top=0.93)
if b <= 2:
plt.savefig('Plots/Manuscript/Evolution_P{}_P{}_{}.pdf'.format(a, b, interactions), format='pdf')
if b > 2:
plt.savefig('Plots/SI/Evolution_P{}_P{}_{}.pdf'.format(a, b, interactions), format='pdf')
# plt.show()
# Fig.~5,S8,
def PPTR(a, b):
Data = np.load('Results/trevolution/Frequency_Season_%d_%d.npy'%(a, b))
plt.figure(figsize=(7,7))
DS = []
for data in Data:
if -1 in data or 101 in data[:2] or 100 in data[:2]:
continue
else:
x = list(map(int,map(float, data[:4])))
if x[0] > 6:
x[0] = 6
if x[1] > 6:
x[1] = 6
DS.append(x)
A = [[0 for j in range(5)] for i in range(5)]
B = [[0 for j in range(5)] for i in range(5)]
for data in DS:
if data[2] > 0:
if data[3] != 0:
A[int(data[0])-2][int(data[1])-2] += 1
B[int(data[0]) - 2][int(data[1]) - 2] += 1
NormA = [[0 for j in range(5)] for i in range(5)]
for i in range(5):
for j in range(5):
NormA[i][j] = A[i][j]/B[i][j]
NormA = np.array(NormA)
h = sns.heatmap(data=NormA, cmap='YlOrBr', annot=True, annot_kws={'size':15}, cbar=False)
cb = h.figure.colorbar(h.collections[0])
cb.ax.tick_params(labelsize=20)
plt.xticks([0.5,1.5,2.5,3.5,4.5],['$2$', '$3$', '$4$', '$5$', '$\geq6$'], fontsize=20)
plt.yticks([0.5,1.5,2.5,3.5,4.5],['$2$', '$3$', '$4$', '$5$', '$\geq6$'], fontsize=20)
plt.xlabel('Tie Range in Phase %d'%b, fontsize=25)
plt.ylabel('Tie Range in Phase %d'%a, fontsize=25)
plt.title('Persistence Probability', fontsize=30)
plt.subplots_adjust(left=0.12, bottom=0.11, right=1, top=0.93)
if b <= 2:
plt.savefig('Plots/Manuscript/PersistenceProbability_P{}_P{}.pdf'.format(a, b), format='pdf')
else:
plt.savefig('Plots/SI/PersistenceProbability_P{}_P{}.pdf'.format(a, b), format='pdf')
# plt.show()
# Fig.~6
def old_new(interactions):
fig = plt.figure(figsize=(7.2,7.2))
ax = plt.axes()
Data = read('Results/{}_season_1_2.txt'.format(interactions))
DS = []
for data in Data:
if '-1' in data or '101' in data[:2] or '100' in data[:2]:
continue
else:
x = list(map(int,map(float, data)))
if x[0] > 6:
x[0] = 6
if x[1] > 6:
x[1] = 6
DS.append(x)
X = [2,3,4,5,6]
H = [[] for i in range(5)]
Y = []
Err = []
for i in range(5):
for j in range(len(DS)):
if DS[j][2] == 0:
H[DS[j][1]-2].append(mt.log(DS[j][3] + 1))
Y.append(np.mean(H[i]))
Err.append(mean_confidence_interval(H[i]))
plt.errorbar(x=X, y=Y, yerr=Err, fmt="o", color="#34495e", ls='--', label='New in Phase 2',
ecolor='#34495e', marker='d', markersize=10, linewidth=2, elinewidth=0.5, capsize=4)
X = [2,3,4,5,6]
H = [[] for i in range(5)]
Y = []
Err = []
for i in range(5):
for j in range(len(DS)):
if DS[j][3] != 0:
H[DS[j][1] - 2].append(mt.log(DS[j][3] + 1))
Y.append(np.mean(H[i]))
Err.append(mean_confidence_interval(H[i]))
plt.errorbar(x=X, y=Y, yerr=Err, fmt="o", color="#2980b9", ls='-', label='Existing in Phase 1',
ecolor='#2980b9', marker='o', markersize=10, linewidth=2, elinewidth=0.5, capsize=4)
plt.xlabel('Tie Range in Phase 2', fontsize=25)
plt.ylabel('$log$(Interaction {}) in Phase 2'.format(interactions), fontsize=25)
plt.xticks([2,3,4,5,6], ['2', '3', '4', '5','$\geq6$'],fontsize=20)
plt.yticks(fontsize=20)
if interactions == 'Frequency':
plt.ylim([0.8, 2])
if interactions == 'Duration':
plt.ylim([3, 6])
legend = plt.legend(frameon=False, loc='upper right', title='Tie Status', fontsize=20)
legend.get_title().set_fontsize(fontsize=20)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.subplots_adjust(left=0.145, bottom=0.11, right=0.98, top=0.97)
plt.savefig('Plots/Manuscript/ExistingvsNew_{}.pdf'.format(interactions), format='pdf')
# plt.show()
| 41.708835
| 153
| 0.530307
| 3,237
| 20,771
| 3.358665
| 0.075687
| 0.04507
| 0.018764
| 0.0344
| 0.847866
| 0.822756
| 0.787252
| 0.763797
| 0.758922
| 0.702539
| 0
| 0.114025
| 0.261952
| 20,771
| 497
| 154
| 41.792757
| 0.595173
| 0.016321
| 0
| 0.746637
| 0
| 0
| 0.097648
| 0.037237
| 0
| 0
| 0
| 0
| 0
| 1
| 0.015695
| false
| 0
| 0.002242
| 0
| 0.017937
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
894f1df185aa660a1cdf6ae635bfedec9cc18326
| 6,086
|
py
|
Python
|
tests/unit/test_profiles.py
|
nordic-institute/X-Road-Security-Server-toolkit
|
1538dbf3d76647f4fb3a72bbe93bf54f414ee9fb
|
[
"MIT"
] | 7
|
2020-11-01T19:50:11.000Z
|
2022-01-18T17:45:19.000Z
|
tests/unit/test_profiles.py
|
nordic-institute/X-Road-Security-Server-toolkit
|
1538dbf3d76647f4fb3a72bbe93bf54f414ee9fb
|
[
"MIT"
] | 24
|
2020-11-09T08:09:10.000Z
|
2021-06-16T07:22:14.000Z
|
tests/unit/test_profiles.py
|
nordic-institute/X-Road-Security-Server-toolkit
|
1538dbf3d76647f4fb3a72bbe93bf54f414ee9fb
|
[
"MIT"
] | 1
|
2021-04-27T14:39:48.000Z
|
2021-04-27T14:39:48.000Z
|
import unittest
from xrdsst.core.profile.profile_data import ProfileData
from xrdsst.core.profile.profile_types_enum import ProfileTypesEnum
from xrdsst.core.profile.certificate_types_enum import CertificateTypesEnum
from xrdsst.core.profile.profile_factory import ProfileFactory
import pytest
class TestProfiles(unittest.TestCase):
profile_data = ProfileData(
instance_identifier="DEV",
member_class="COM",
member_code="12345",
security_server_dns="ssX_dns",
security_server_code="ssX_code",
owner_code="111",
owner_class="ORG",
member_name="NIIS"
)
@pytest.fixture(autouse=True)
def capsys(self, capsys):
self.capsys = capsys
def test_profile_data_construct(self):
assert self.profile_data.serial_number_auth == "DEV/ssX_code/ORG"
assert self.profile_data.serial_number_sign == "DEV/ssX_code/COM"
assert self.profile_data.security_server_id == "/ORG/111/ssX_code"
def test_ejbca_auth_certificate_profile(self):
profile = ProfileFactory().get_profile_builder(certificate_type=CertificateTypesEnum.AUTH, profile_type=ProfileTypesEnum.EJBCA)
result = profile.build_profile(profile_data=self.profile_data)
assert len(result) == 2
assert result["C"] == self.profile_data.instance_identifier
assert result["CN"] == self.profile_data.security_server_code
def test_ejbca_sign_certificate_profile(self):
profile = ProfileFactory().get_profile_builder(certificate_type=CertificateTypesEnum.SIGN, profile_type=ProfileTypesEnum.EJBCA)
result = profile.build_profile(profile_data=self.profile_data)
assert len(result) == 3
assert result["C"] == self.profile_data.instance_identifier
assert result["O"] == self.profile_data.member_class
assert result["CN"] == self.profile_data.member_code
def test_fivrk_auth_certificate_profile(self):
profile = ProfileFactory().get_profile_builder(certificate_type=CertificateTypesEnum.AUTH, profile_type=ProfileTypesEnum.FI)
result = profile.build_profile(profile_data=self.profile_data)
assert len(result) == 4
assert result["C"] == "FI"
assert result["O"] == self.profile_data.member_name
assert result["serialNumber"] == self.profile_data.serial_number_auth
assert result["CN"] == self.profile_data.security_server_dns
def test_fivrk_sign_certificate_profile(self):
profile = ProfileFactory().get_profile_builder(certificate_type=CertificateTypesEnum.SIGN, profile_type=ProfileTypesEnum.FI)
result = profile.build_profile(profile_data=self.profile_data)
assert len(result) == 4
assert result["C"] == "FI"
assert result["O"] == self.profile_data.member_name
assert result["serialNumber"] == self.profile_data.serial_number_sign
assert result["CN"] == self.profile_data.member_code
def test_fo_auth_certificate_profile(self):
profile = ProfileFactory().get_profile_builder(certificate_type=CertificateTypesEnum.AUTH, profile_type=ProfileTypesEnum.FO)
result = profile.build_profile(profile_data=self.profile_data)
assert len(result) == 3
assert result["C"] == "FO"
assert result["O"] == self.profile_data.instance_identifier
assert result["CN"] == self.profile_data.security_server_id
def test_fo_sign_certificate_profile(self):
profile = ProfileFactory().get_profile_builder(certificate_type=CertificateTypesEnum.SIGN, profile_type=ProfileTypesEnum.FO)
result = profile.build_profile(profile_data=self.profile_data)
assert len(result) == 5
assert result["C"] == "FO"
assert result["O"] == self.profile_data.instance_identifier
assert result["OU"] == self.profile_data.member_class
assert result["CN"] == self.profile_data.member_code
assert result["serialNumber"] == self.profile_data.security_server_id
def test_is_auth_certificate_profile(self):
profile = ProfileFactory().get_profile_builder(certificate_type=CertificateTypesEnum.AUTH, profile_type=ProfileTypesEnum.IS)
result = profile.build_profile(profile_data=self.profile_data)
assert len(result) == 3
assert result["C"] == "IS"
assert result["CN"] == self.profile_data.security_server_dns
assert result["serialNumber"] == self.profile_data.security_server_id
def test_is_sign_certificate_profile(self):
profile = ProfileFactory().get_profile_builder(certificate_type=CertificateTypesEnum.SIGN, profile_type=ProfileTypesEnum.IS)
result = profile.build_profile(profile_data=self.profile_data)
assert len(result) == 5
assert result["C"] == "IS"
assert result["O"] == self.profile_data.instance_identifier
assert result["OU"] == self.profile_data.member_class
assert result["CN"] == self.profile_data.member_code
assert result["serialNumber"] == self.profile_data.security_server_id
def test_default_auth_certificate_profile(self):
profile = ProfileFactory().get_profile_builder(certificate_type=CertificateTypesEnum.AUTH, profile_type=None)
result = profile.build_profile(profile_data=self.profile_data)
assert len(result) == 4
assert result["C"] == "FI"
assert result["O"] == self.profile_data.member_name
assert result["serialNumber"] == self.profile_data.serial_number_auth
assert result["CN"] == self.profile_data.security_server_dns
def test_default_sign_certificate_profile(self):
profile = ProfileFactory().get_profile_builder(certificate_type=CertificateTypesEnum.SIGN, profile_type=None)
result = profile.build_profile(profile_data=self.profile_data)
assert len(result) == 4
assert result["C"] == "FI"
assert result["O"] == self.profile_data.member_name
assert result["serialNumber"] == self.profile_data.serial_number_sign
assert result["CN"] == self.profile_data.member_code
| 47.92126
| 135
| 0.725107
| 727
| 6,086
| 5.775791
| 0.097662
| 0.144082
| 0.150036
| 0.060014
| 0.867588
| 0.847583
| 0.821624
| 0.821386
| 0.819243
| 0.808288
| 0
| 0.00417
| 0.172527
| 6,086
| 126
| 136
| 48.301587
| 0.829627
| 0
| 0
| 0.524752
| 0
| 0
| 0.037305
| 0
| 0
| 0
| 0
| 0
| 0.49505
| 1
| 0.118812
| false
| 0
| 0.059406
| 0
| 0.19802
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
89745c805620b9a97bf8d70fce57d3053bbc657e
| 93
|
py
|
Python
|
tests/test_A001221.py
|
Olyno/oeis
|
300446e7735a90af10f14353366326bfcff33ae4
|
[
"MIT"
] | null | null | null |
tests/test_A001221.py
|
Olyno/oeis
|
300446e7735a90af10f14353366326bfcff33ae4
|
[
"MIT"
] | null | null | null |
tests/test_A001221.py
|
Olyno/oeis
|
300446e7735a90af10f14353366326bfcff33ae4
|
[
"MIT"
] | null | null | null |
from oeis import A001221
def test_A001221():
assert A001221[0:6] == [0, 1, 1, 1, 1, 2]
| 15.5
| 45
| 0.612903
| 17
| 93
| 3.294118
| 0.647059
| 0.107143
| 0.107143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.361111
| 0.225806
| 93
| 5
| 46
| 18.6
| 0.416667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
899c2fb11cc2e1ce42d66b5d638fd1584bc57975
| 2,225
|
py
|
Python
|
orders/migrations/0023_auto_20200120_1608.py
|
yun-mh/uniwalk
|
f5307f6970b24736d13b56b4792c580398c35b3a
|
[
"Apache-2.0"
] | null | null | null |
orders/migrations/0023_auto_20200120_1608.py
|
yun-mh/uniwalk
|
f5307f6970b24736d13b56b4792c580398c35b3a
|
[
"Apache-2.0"
] | 9
|
2020-01-10T14:10:02.000Z
|
2022-03-12T00:08:19.000Z
|
orders/migrations/0023_auto_20200120_1608.py
|
yun-mh/uniwalk
|
f5307f6970b24736d13b56b4792c580398c35b3a
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 2.2.5 on 2020-01-20 07:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('orders', '0022_auto_20200120_1554'),
]
operations = [
migrations.AlterField(
model_name='orderitem',
name='midsole_material_left',
field=models.CharField(max_length=20, verbose_name='ミッドソール素材(左)'),
),
migrations.AlterField(
model_name='orderitem',
name='midsole_material_right',
field=models.CharField(max_length=20, verbose_name='ミッドソール素材(右)'),
),
migrations.AlterField(
model_name='orderitem',
name='outsole_material_left',
field=models.CharField(max_length=20, verbose_name='アウトソール素材(左)'),
),
migrations.AlterField(
model_name='orderitem',
name='outsole_material_right',
field=models.CharField(max_length=20, verbose_name='アウトソール素材(右)'),
),
migrations.AlterField(
model_name='orderitem',
name='shoelace_material_left',
field=models.CharField(max_length=20, verbose_name='シューレース素材(左)'),
),
migrations.AlterField(
model_name='orderitem',
name='shoelace_material_right',
field=models.CharField(max_length=20, verbose_name='シューレース素材(右)'),
),
migrations.AlterField(
model_name='orderitem',
name='tongue_material_left',
field=models.CharField(max_length=20, verbose_name='タン素材(左)'),
),
migrations.AlterField(
model_name='orderitem',
name='tongue_material_right',
field=models.CharField(max_length=20, verbose_name='タン素材(右)'),
),
migrations.AlterField(
model_name='orderitem',
name='uppersole_material_left',
field=models.CharField(max_length=20, verbose_name='アッパーソール素材(左)'),
),
migrations.AlterField(
model_name='orderitem',
name='uppersole_material_right',
field=models.CharField(max_length=20, verbose_name='アッパーソール素材(右)'),
),
]
| 34.765625
| 79
| 0.595506
| 219
| 2,225
| 5.808219
| 0.223744
| 0.157233
| 0.196541
| 0.227987
| 0.882075
| 0.882075
| 0.882075
| 0.875
| 0.486635
| 0.428459
| 0
| 0.032055
| 0.284944
| 2,225
| 63
| 80
| 35.31746
| 0.767442
| 0.020225
| 0
| 0.526316
| 1
| 0
| 0.202938
| 0.101928
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.017544
| 0
| 0.070175
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
89b4e78819ae4327328461dfc804a2e885d7917b
| 7,547
|
py
|
Python
|
mineable_token_abis.py
|
JoaoCampos89/0xbtc-discord-price-bot
|
6eb6839213dfb0176c0be72c2dda7193d4131750
|
[
"MIT"
] | 10
|
2018-05-01T14:54:55.000Z
|
2022-01-24T09:37:31.000Z
|
mineable_token_abis.py
|
JoaoCampos89/0xbtc-discord-price-bot
|
6eb6839213dfb0176c0be72c2dda7193d4131750
|
[
"MIT"
] | 1
|
2019-04-10T16:44:52.000Z
|
2019-04-10T16:44:52.000Z
|
mineable_token_abis.py
|
JoaoCampos89/0xbtc-discord-price-bot
|
6eb6839213dfb0176c0be72c2dda7193d4131750
|
[
"MIT"
] | 16
|
2018-06-06T21:44:18.000Z
|
2021-09-22T09:35:59.000Z
|
abis ={
'0xBTC' : '[{"constant":true,"inputs":[],"name":"name","outputs":[{"name":"","type":"string"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"name":"spender","type":"address"},{"name":"tokens","type":"uint256"}],"name":"approve","outputs":[{"name":"success","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"lastRewardEthBlockNumber","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"getMiningDifficulty","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"name":"nonce","type":"uint256"},{"name":"challenge_digest","type":"bytes32"}],"name":"mint","outputs":[{"name":"success","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"totalSupply","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"name":"from","type":"address"},{"name":"to","type":"address"},{"name":"tokens","type":"uint256"}],"name":"transferFrom","outputs":[{"name":"success","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"rewardEra","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"decimals","outputs":[{"name":"","type":"uint8"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"getMiningTarget","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"_totalSupply","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"getMiningReward","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"getChallengeNumber","outputs":[{"name":"","type":"bytes32"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"maxSupplyForEra","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"tokensMinted","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"lastRewardTo","outputs":[{"name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"name":"tokenOwner","type":"address"}],"name":"balanceOf","outputs":[{"name":"balance","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[],"name":"acceptOwnership","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[{"name":"nonce","type":"uint256"},{"name":"challenge_digest","type":"bytes32"},{"name":"challenge_number","type":"bytes32"},{"name":"testTarget","type":"uint256"}],"name":"checkMintSolution","outputs":[{"name":"success","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"epochCount","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"_MAXIMUM_TARGET","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"miningTarget","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"challengeNumber","outputs":[{"name":"","type":"bytes32"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"owner","outputs":[{"name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"symbol","outputs":[{"name":"","type":"string"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"name":"nonce","type":"uint256"},{"name":"challenge_digest","type":"bytes32"},{"name":"challenge_number","type":"bytes32"}],"name":"getMintDigest","outputs":[{"name":"digesttest","type":"bytes32"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"name":"to","type":"address"},{"name":"tokens","type":"uint256"}],"name":"transfer","outputs":[{"name":"success","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"_BLOCKS_PER_READJUSTMENT","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"lastRewardAmount","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"name":"spender","type":"address"},{"name":"tokens","type":"uint256"},{"name":"data","type":"bytes"}],"name":"approveAndCall","outputs":[{"name":"success","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"latestDifficultyPeriodStarted","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"newOwner","outputs":[{"name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"name":"tokenAddress","type":"address"},{"name":"tokens","type":"uint256"}],"name":"transferAnyERC20Token","outputs":[{"name":"success","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"_MINIMUM_TARGET","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"name":"tokenOwner","type":"address"},{"name":"spender","type":"address"}],"name":"allowance","outputs":[{"name":"remaining","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"name":"_newOwner","type":"address"}],"name":"transferOwnership","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"inputs":[],"payable":false,"stateMutability":"nonpayable","type":"constructor"},{"payable":true,"stateMutability":"payable","type":"fallback"},{"anonymous":false,"inputs":[{"indexed":true,"name":"from","type":"address"},{"indexed":false,"name":"reward_amount","type":"uint256"},{"indexed":false,"name":"epochCount","type":"uint256"},{"indexed":false,"name":"newChallengeNumber","type":"bytes32"}],"name":"Mint","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"_from","type":"address"},{"indexed":true,"name":"_to","type":"address"}],"name":"OwnershipTransferred","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"from","type":"address"},{"indexed":true,"name":"to","type":"address"},{"indexed":false,"name":"tokens","type":"uint256"}],"name":"Transfer","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"tokenOwner","type":"address"},{"indexed":true,"name":"spender","type":"address"},{"indexed":false,"name":"tokens","type":"uint256"}],"name":"Approval","type":"event"}]',
}
| 1,509.4
| 7,534
| 0.65642
| 769
| 7,547
| 6.420026
| 0.109233
| 0.089933
| 0.20235
| 0.124772
| 0.837553
| 0.805955
| 0.802714
| 0.782662
| 0.768483
| 0.732834
| 0
| 0.015262
| 0.00159
| 7,547
| 4
| 7,535
| 1,886.75
| 0.639947
| 0
| 0
| 0
| 0
| 0.333333
| 0.99682
| 0.996157
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 13
|
986489d2c4411e24fa2a9920e92a604671584311
| 88
|
py
|
Python
|
app/users/main/__init__.py
|
trinanda/app-store
|
a2ad176312c548b065f0e0acce4a51a10c96fb8f
|
[
"MIT"
] | null | null | null |
app/users/main/__init__.py
|
trinanda/app-store
|
a2ad176312c548b065f0e0acce4a51a10c96fb8f
|
[
"MIT"
] | null | null | null |
app/users/main/__init__.py
|
trinanda/app-store
|
a2ad176312c548b065f0e0acce4a51a10c96fb8f
|
[
"MIT"
] | null | null | null |
from app.users.main import errors # noqa
from app.users.main.views import main # noqa
| 29.333333
| 45
| 0.761364
| 15
| 88
| 4.466667
| 0.533333
| 0.208955
| 0.358209
| 0.477612
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.159091
| 88
| 2
| 46
| 44
| 0.905405
| 0.102273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
988dbc521178901494305c4a8a8cdb51cbdff977
| 143,996
|
py
|
Python
|
TweakApi/apis/portal_template_folder_api.py
|
tweak-com-public/tweak-api-client-python
|
019f86da11fdb12683d516f8f37db5d717380bcc
|
[
"Apache-2.0"
] | null | null | null |
TweakApi/apis/portal_template_folder_api.py
|
tweak-com-public/tweak-api-client-python
|
019f86da11fdb12683d516f8f37db5d717380bcc
|
[
"Apache-2.0"
] | null | null | null |
TweakApi/apis/portal_template_folder_api.py
|
tweak-com-public/tweak-api-client-python
|
019f86da11fdb12683d516f8f37db5d717380bcc
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
tweak-api
Tweak API to integrate with all the Tweak services. You can find out more about Tweak at <a href='https://www.tweak.com'>https://www.tweak.com</a>, #tweak.
OpenAPI spec version: 1.0.8-beta.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class PortalTemplateFolderApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def portal_template_folders_change_stream_get(self, **kwargs):
"""
Create a change stream.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_template_folders_change_stream_get(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str options:
:return: file
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portal_template_folders_change_stream_get_with_http_info(**kwargs)
else:
(data) = self.portal_template_folders_change_stream_get_with_http_info(**kwargs)
return data
def portal_template_folders_change_stream_get_with_http_info(self, **kwargs):
"""
Create a change stream.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_template_folders_change_stream_get_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str options:
:return: file
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['options']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portal_template_folders_change_stream_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/PortalTemplateFolders/change-stream'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'options' in params:
query_params['options'] = params['options']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='file',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portal_template_folders_change_stream_post(self, **kwargs):
"""
Create a change stream.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_template_folders_change_stream_post(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str options:
:return: file
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portal_template_folders_change_stream_post_with_http_info(**kwargs)
else:
(data) = self.portal_template_folders_change_stream_post_with_http_info(**kwargs)
return data
def portal_template_folders_change_stream_post_with_http_info(self, **kwargs):
"""
Create a change stream.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_template_folders_change_stream_post_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str options:
:return: file
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['options']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portal_template_folders_change_stream_post" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/PortalTemplateFolders/change-stream'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
if 'options' in params:
form_params.append(('options', params['options']))
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='file',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portal_template_folders_count_get(self, **kwargs):
"""
Count instances of the model matched by where from the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_template_folders_count_get(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str where: Criteria to match model instances
:return: InlineResponse2001
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portal_template_folders_count_get_with_http_info(**kwargs)
else:
(data) = self.portal_template_folders_count_get_with_http_info(**kwargs)
return data
def portal_template_folders_count_get_with_http_info(self, **kwargs):
"""
Count instances of the model matched by where from the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_template_folders_count_get_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str where: Criteria to match model instances
:return: InlineResponse2001
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['where']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portal_template_folders_count_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/PortalTemplateFolders/count'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'where' in params:
query_params['where'] = params['where']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse2001',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portal_template_folders_find_one_get(self, **kwargs):
"""
Find first instance of the model matched by filter from the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_template_folders_find_one_get(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str filter: Filter defining fields, where, include, order, offset, and limit - must be a JSON-encoded string ({\"something\":\"value\"})
:return: PortalTemplateFolder
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portal_template_folders_find_one_get_with_http_info(**kwargs)
else:
(data) = self.portal_template_folders_find_one_get_with_http_info(**kwargs)
return data
def portal_template_folders_find_one_get_with_http_info(self, **kwargs):
"""
Find first instance of the model matched by filter from the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_template_folders_find_one_get_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str filter: Filter defining fields, where, include, order, offset, and limit - must be a JSON-encoded string ({\"something\":\"value\"})
:return: PortalTemplateFolder
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['filter']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portal_template_folders_find_one_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/PortalTemplateFolders/findOne'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'filter' in params:
query_params['filter'] = params['filter']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PortalTemplateFolder',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portal_template_folders_get(self, **kwargs):
"""
Find all instances of the model matched by filter from the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_template_folders_get(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str filter: Filter defining fields, where, include, order, offset, and limit - must be a JSON-encoded string ({\"something\":\"value\"})
:return: list[PortalTemplateFolder]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portal_template_folders_get_with_http_info(**kwargs)
else:
(data) = self.portal_template_folders_get_with_http_info(**kwargs)
return data
def portal_template_folders_get_with_http_info(self, **kwargs):
"""
Find all instances of the model matched by filter from the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_template_folders_get_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str filter: Filter defining fields, where, include, order, offset, and limit - must be a JSON-encoded string ({\"something\":\"value\"})
:return: list[PortalTemplateFolder]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['filter']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portal_template_folders_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/PortalTemplateFolders'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'filter' in params:
query_params['filter'] = params['filter']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[PortalTemplateFolder]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portal_template_folders_id_children_count_get(self, id, **kwargs):
"""
Counts children of PortalTemplateFolder.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_template_folders_id_children_count_get(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: PortalTemplateFolder id (required)
:param str where: Criteria to match model instances
:return: InlineResponse2001
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portal_template_folders_id_children_count_get_with_http_info(id, **kwargs)
else:
(data) = self.portal_template_folders_id_children_count_get_with_http_info(id, **kwargs)
return data
def portal_template_folders_id_children_count_get_with_http_info(self, id, **kwargs):
"""
Counts children of PortalTemplateFolder.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_template_folders_id_children_count_get_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: PortalTemplateFolder id (required)
:param str where: Criteria to match model instances
:return: InlineResponse2001
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'where']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portal_template_folders_id_children_count_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portal_template_folders_id_children_count_get`")
collection_formats = {}
resource_path = '/PortalTemplateFolders/{id}/children/count'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
if 'where' in params:
query_params['where'] = params['where']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse2001',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portal_template_folders_id_children_fk_delete(self, id, fk, **kwargs):
"""
Delete a related item by id for children.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_template_folders_id_children_fk_delete(id, fk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: PortalTemplateFolder id (required)
:param str fk: Foreign key for children (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portal_template_folders_id_children_fk_delete_with_http_info(id, fk, **kwargs)
else:
(data) = self.portal_template_folders_id_children_fk_delete_with_http_info(id, fk, **kwargs)
return data
def portal_template_folders_id_children_fk_delete_with_http_info(self, id, fk, **kwargs):
"""
Delete a related item by id for children.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_template_folders_id_children_fk_delete_with_http_info(id, fk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: PortalTemplateFolder id (required)
:param str fk: Foreign key for children (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'fk']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portal_template_folders_id_children_fk_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portal_template_folders_id_children_fk_delete`")
# verify the required parameter 'fk' is set
if ('fk' not in params) or (params['fk'] is None):
raise ValueError("Missing the required parameter `fk` when calling `portal_template_folders_id_children_fk_delete`")
collection_formats = {}
resource_path = '/PortalTemplateFolders/{id}/children/{fk}'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
if 'fk' in params:
path_params['fk'] = params['fk']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portal_template_folders_id_children_fk_get(self, id, fk, **kwargs):
"""
Find a related item by id for children.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_template_folders_id_children_fk_get(id, fk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: PortalTemplateFolder id (required)
:param str fk: Foreign key for children (required)
:return: PortalTemplateFolder
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portal_template_folders_id_children_fk_get_with_http_info(id, fk, **kwargs)
else:
(data) = self.portal_template_folders_id_children_fk_get_with_http_info(id, fk, **kwargs)
return data
def portal_template_folders_id_children_fk_get_with_http_info(self, id, fk, **kwargs):
"""
Find a related item by id for children.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_template_folders_id_children_fk_get_with_http_info(id, fk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: PortalTemplateFolder id (required)
:param str fk: Foreign key for children (required)
:return: PortalTemplateFolder
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'fk']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portal_template_folders_id_children_fk_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portal_template_folders_id_children_fk_get`")
# verify the required parameter 'fk' is set
if ('fk' not in params) or (params['fk'] is None):
raise ValueError("Missing the required parameter `fk` when calling `portal_template_folders_id_children_fk_get`")
collection_formats = {}
resource_path = '/PortalTemplateFolders/{id}/children/{fk}'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
if 'fk' in params:
path_params['fk'] = params['fk']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PortalTemplateFolder',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portal_template_folders_id_children_fk_put(self, id, fk, **kwargs):
"""
Update a related item by id for children.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_template_folders_id_children_fk_put(id, fk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: PortalTemplateFolder id (required)
:param str fk: Foreign key for children (required)
:param PortalTemplateFolder data:
:return: PortalTemplateFolder
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portal_template_folders_id_children_fk_put_with_http_info(id, fk, **kwargs)
else:
(data) = self.portal_template_folders_id_children_fk_put_with_http_info(id, fk, **kwargs)
return data
def portal_template_folders_id_children_fk_put_with_http_info(self, id, fk, **kwargs):
"""
Update a related item by id for children.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_template_folders_id_children_fk_put_with_http_info(id, fk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: PortalTemplateFolder id (required)
:param str fk: Foreign key for children (required)
:param PortalTemplateFolder data:
:return: PortalTemplateFolder
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'fk', 'data']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portal_template_folders_id_children_fk_put" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portal_template_folders_id_children_fk_put`")
# verify the required parameter 'fk' is set
if ('fk' not in params) or (params['fk'] is None):
raise ValueError("Missing the required parameter `fk` when calling `portal_template_folders_id_children_fk_put`")
collection_formats = {}
resource_path = '/PortalTemplateFolders/{id}/children/{fk}'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
if 'fk' in params:
path_params['fk'] = params['fk']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'data' in params:
body_params = params['data']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PortalTemplateFolder',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portal_template_folders_id_children_get(self, id, **kwargs):
"""
Queries children of PortalTemplateFolder.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_template_folders_id_children_get(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: PortalTemplateFolder id (required)
:param str filter:
:return: list[PortalTemplateFolder]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portal_template_folders_id_children_get_with_http_info(id, **kwargs)
else:
(data) = self.portal_template_folders_id_children_get_with_http_info(id, **kwargs)
return data
def portal_template_folders_id_children_get_with_http_info(self, id, **kwargs):
"""
Queries children of PortalTemplateFolder.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_template_folders_id_children_get_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: PortalTemplateFolder id (required)
:param str filter:
:return: list[PortalTemplateFolder]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'filter']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portal_template_folders_id_children_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portal_template_folders_id_children_get`")
collection_formats = {}
resource_path = '/PortalTemplateFolders/{id}/children'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
if 'filter' in params:
query_params['filter'] = params['filter']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[PortalTemplateFolder]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portal_template_folders_id_children_post(self, id, **kwargs):
"""
Creates a new instance in children of this model.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_template_folders_id_children_post(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: PortalTemplateFolder id (required)
:param PortalTemplateFolder data:
:return: PortalTemplateFolder
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portal_template_folders_id_children_post_with_http_info(id, **kwargs)
else:
(data) = self.portal_template_folders_id_children_post_with_http_info(id, **kwargs)
return data
def portal_template_folders_id_children_post_with_http_info(self, id, **kwargs):
"""
Creates a new instance in children of this model.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_template_folders_id_children_post_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: PortalTemplateFolder id (required)
:param PortalTemplateFolder data:
:return: PortalTemplateFolder
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'data']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portal_template_folders_id_children_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portal_template_folders_id_children_post`")
collection_formats = {}
resource_path = '/PortalTemplateFolders/{id}/children'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'data' in params:
body_params = params['data']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PortalTemplateFolder',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portal_template_folders_id_delete(self, id, **kwargs):
"""
Delete a model instance by {{id}} from the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_template_folders_id_delete(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Model id (required)
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portal_template_folders_id_delete_with_http_info(id, **kwargs)
else:
(data) = self.portal_template_folders_id_delete_with_http_info(id, **kwargs)
return data
def portal_template_folders_id_delete_with_http_info(self, id, **kwargs):
"""
Delete a model instance by {{id}} from the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_template_folders_id_delete_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Model id (required)
:return: object
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portal_template_folders_id_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portal_template_folders_id_delete`")
collection_formats = {}
resource_path = '/PortalTemplateFolders/{id}'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='object',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portal_template_folders_id_exists_get(self, id, **kwargs):
"""
Check whether a model instance exists in the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_template_folders_id_exists_get(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Model id (required)
:return: InlineResponse2002
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portal_template_folders_id_exists_get_with_http_info(id, **kwargs)
else:
(data) = self.portal_template_folders_id_exists_get_with_http_info(id, **kwargs)
return data
def portal_template_folders_id_exists_get_with_http_info(self, id, **kwargs):
"""
Check whether a model instance exists in the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_template_folders_id_exists_get_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Model id (required)
:return: InlineResponse2002
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portal_template_folders_id_exists_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portal_template_folders_id_exists_get`")
collection_formats = {}
resource_path = '/PortalTemplateFolders/{id}/exists'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse2002',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portal_template_folders_id_get(self, id, **kwargs):
"""
Find a model instance by {{id}} from the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_template_folders_id_get(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Model id (required)
:param str filter: Filter defining fields and include - must be a JSON-encoded string ({\"something\":\"value\"})
:return: PortalTemplateFolder
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portal_template_folders_id_get_with_http_info(id, **kwargs)
else:
(data) = self.portal_template_folders_id_get_with_http_info(id, **kwargs)
return data
def portal_template_folders_id_get_with_http_info(self, id, **kwargs):
"""
Find a model instance by {{id}} from the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_template_folders_id_get_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Model id (required)
:param str filter: Filter defining fields and include - must be a JSON-encoded string ({\"something\":\"value\"})
:return: PortalTemplateFolder
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'filter']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portal_template_folders_id_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portal_template_folders_id_get`")
collection_formats = {}
resource_path = '/PortalTemplateFolders/{id}'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
if 'filter' in params:
query_params['filter'] = params['filter']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PortalTemplateFolder',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portal_template_folders_id_head(self, id, **kwargs):
"""
Check whether a model instance exists in the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_template_folders_id_head(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Model id (required)
:return: InlineResponse2002
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portal_template_folders_id_head_with_http_info(id, **kwargs)
else:
(data) = self.portal_template_folders_id_head_with_http_info(id, **kwargs)
return data
def portal_template_folders_id_head_with_http_info(self, id, **kwargs):
"""
Check whether a model instance exists in the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_template_folders_id_head_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Model id (required)
:return: InlineResponse2002
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portal_template_folders_id_head" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portal_template_folders_id_head`")
collection_formats = {}
resource_path = '/PortalTemplateFolders/{id}'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'HEAD',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse2002',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portal_template_folders_id_parent_get(self, id, **kwargs):
"""
Fetches belongsTo relation parent.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_template_folders_id_parent_get(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: PortalTemplateFolder id (required)
:param bool refresh:
:return: PortalTemplateFolder
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portal_template_folders_id_parent_get_with_http_info(id, **kwargs)
else:
(data) = self.portal_template_folders_id_parent_get_with_http_info(id, **kwargs)
return data
def portal_template_folders_id_parent_get_with_http_info(self, id, **kwargs):
"""
Fetches belongsTo relation parent.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_template_folders_id_parent_get_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: PortalTemplateFolder id (required)
:param bool refresh:
:return: PortalTemplateFolder
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'refresh']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portal_template_folders_id_parent_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portal_template_folders_id_parent_get`")
collection_formats = {}
resource_path = '/PortalTemplateFolders/{id}/parent'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
if 'refresh' in params:
query_params['refresh'] = params['refresh']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PortalTemplateFolder',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portal_template_folders_id_patch(self, id, **kwargs):
"""
Patch attributes for a model instance and persist it into the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_template_folders_id_patch(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: PortalTemplateFolder id (required)
:param PortalTemplateFolder data: An object of model property name/value pairs
:return: PortalTemplateFolder
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portal_template_folders_id_patch_with_http_info(id, **kwargs)
else:
(data) = self.portal_template_folders_id_patch_with_http_info(id, **kwargs)
return data
def portal_template_folders_id_patch_with_http_info(self, id, **kwargs):
"""
Patch attributes for a model instance and persist it into the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_template_folders_id_patch_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: PortalTemplateFolder id (required)
:param PortalTemplateFolder data: An object of model property name/value pairs
:return: PortalTemplateFolder
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'data']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portal_template_folders_id_patch" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portal_template_folders_id_patch`")
collection_formats = {}
resource_path = '/PortalTemplateFolders/{id}'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'data' in params:
body_params = params['data']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PortalTemplateFolder',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portal_template_folders_id_portal_get(self, id, **kwargs):
"""
Fetches belongsTo relation portal.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_template_folders_id_portal_get(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: PortalTemplateFolder id (required)
:param bool refresh:
:return: Portal
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portal_template_folders_id_portal_get_with_http_info(id, **kwargs)
else:
(data) = self.portal_template_folders_id_portal_get_with_http_info(id, **kwargs)
return data
def portal_template_folders_id_portal_get_with_http_info(self, id, **kwargs):
"""
Fetches belongsTo relation portal.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_template_folders_id_portal_get_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: PortalTemplateFolder id (required)
:param bool refresh:
:return: Portal
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'refresh']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portal_template_folders_id_portal_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portal_template_folders_id_portal_get`")
collection_formats = {}
resource_path = '/PortalTemplateFolders/{id}/portal'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
if 'refresh' in params:
query_params['refresh'] = params['refresh']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Portal',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portal_template_folders_id_put(self, id, **kwargs):
"""
Replace attributes for a model instance and persist it into the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_template_folders_id_put(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Model id (required)
:param PortalTemplateFolder data: Model instance data
:return: PortalTemplateFolder
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portal_template_folders_id_put_with_http_info(id, **kwargs)
else:
(data) = self.portal_template_folders_id_put_with_http_info(id, **kwargs)
return data
def portal_template_folders_id_put_with_http_info(self, id, **kwargs):
"""
Replace attributes for a model instance and persist it into the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_template_folders_id_put_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Model id (required)
:param PortalTemplateFolder data: Model instance data
:return: PortalTemplateFolder
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'data']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portal_template_folders_id_put" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portal_template_folders_id_put`")
collection_formats = {}
resource_path = '/PortalTemplateFolders/{id}'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'data' in params:
body_params = params['data']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PortalTemplateFolder',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portal_template_folders_id_replace_post(self, id, **kwargs):
"""
Replace attributes for a model instance and persist it into the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_template_folders_id_replace_post(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Model id (required)
:param PortalTemplateFolder data: Model instance data
:return: PortalTemplateFolder
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portal_template_folders_id_replace_post_with_http_info(id, **kwargs)
else:
(data) = self.portal_template_folders_id_replace_post_with_http_info(id, **kwargs)
return data
def portal_template_folders_id_replace_post_with_http_info(self, id, **kwargs):
"""
Replace attributes for a model instance and persist it into the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_template_folders_id_replace_post_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Model id (required)
:param PortalTemplateFolder data: Model instance data
:return: PortalTemplateFolder
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'data']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portal_template_folders_id_replace_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portal_template_folders_id_replace_post`")
collection_formats = {}
resource_path = '/PortalTemplateFolders/{id}/replace'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'data' in params:
body_params = params['data']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PortalTemplateFolder',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portal_template_folders_id_templates_count_get(self, id, **kwargs):
"""
Counts templates of PortalTemplateFolder.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_template_folders_id_templates_count_get(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: PortalTemplateFolder id (required)
:param str where: Criteria to match model instances
:return: InlineResponse2001
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portal_template_folders_id_templates_count_get_with_http_info(id, **kwargs)
else:
(data) = self.portal_template_folders_id_templates_count_get_with_http_info(id, **kwargs)
return data
def portal_template_folders_id_templates_count_get_with_http_info(self, id, **kwargs):
"""
Counts templates of PortalTemplateFolder.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_template_folders_id_templates_count_get_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: PortalTemplateFolder id (required)
:param str where: Criteria to match model instances
:return: InlineResponse2001
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'where']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portal_template_folders_id_templates_count_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portal_template_folders_id_templates_count_get`")
collection_formats = {}
resource_path = '/PortalTemplateFolders/{id}/templates/count'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
if 'where' in params:
query_params['where'] = params['where']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse2001',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portal_template_folders_id_templates_fk_delete(self, id, fk, **kwargs):
"""
Delete a related item by id for templates.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_template_folders_id_templates_fk_delete(id, fk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: PortalTemplateFolder id (required)
:param str fk: Foreign key for templates (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portal_template_folders_id_templates_fk_delete_with_http_info(id, fk, **kwargs)
else:
(data) = self.portal_template_folders_id_templates_fk_delete_with_http_info(id, fk, **kwargs)
return data
def portal_template_folders_id_templates_fk_delete_with_http_info(self, id, fk, **kwargs):
"""
Delete a related item by id for templates.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_template_folders_id_templates_fk_delete_with_http_info(id, fk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: PortalTemplateFolder id (required)
:param str fk: Foreign key for templates (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'fk']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portal_template_folders_id_templates_fk_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portal_template_folders_id_templates_fk_delete`")
# verify the required parameter 'fk' is set
if ('fk' not in params) or (params['fk'] is None):
raise ValueError("Missing the required parameter `fk` when calling `portal_template_folders_id_templates_fk_delete`")
collection_formats = {}
resource_path = '/PortalTemplateFolders/{id}/templates/{fk}'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
if 'fk' in params:
path_params['fk'] = params['fk']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portal_template_folders_id_templates_fk_get(self, id, fk, **kwargs):
"""
Find a related item by id for templates.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_template_folders_id_templates_fk_get(id, fk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: PortalTemplateFolder id (required)
:param str fk: Foreign key for templates (required)
:return: Template
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portal_template_folders_id_templates_fk_get_with_http_info(id, fk, **kwargs)
else:
(data) = self.portal_template_folders_id_templates_fk_get_with_http_info(id, fk, **kwargs)
return data
def portal_template_folders_id_templates_fk_get_with_http_info(self, id, fk, **kwargs):
"""
Find a related item by id for templates.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_template_folders_id_templates_fk_get_with_http_info(id, fk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: PortalTemplateFolder id (required)
:param str fk: Foreign key for templates (required)
:return: Template
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'fk']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portal_template_folders_id_templates_fk_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portal_template_folders_id_templates_fk_get`")
# verify the required parameter 'fk' is set
if ('fk' not in params) or (params['fk'] is None):
raise ValueError("Missing the required parameter `fk` when calling `portal_template_folders_id_templates_fk_get`")
collection_formats = {}
resource_path = '/PortalTemplateFolders/{id}/templates/{fk}'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
if 'fk' in params:
path_params['fk'] = params['fk']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Template',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portal_template_folders_id_templates_fk_put(self, id, fk, **kwargs):
"""
Update a related item by id for templates.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_template_folders_id_templates_fk_put(id, fk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: PortalTemplateFolder id (required)
:param str fk: Foreign key for templates (required)
:param Template data:
:return: Template
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portal_template_folders_id_templates_fk_put_with_http_info(id, fk, **kwargs)
else:
(data) = self.portal_template_folders_id_templates_fk_put_with_http_info(id, fk, **kwargs)
return data
def portal_template_folders_id_templates_fk_put_with_http_info(self, id, fk, **kwargs):
"""
Update a related item by id for templates.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_template_folders_id_templates_fk_put_with_http_info(id, fk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: PortalTemplateFolder id (required)
:param str fk: Foreign key for templates (required)
:param Template data:
:return: Template
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'fk', 'data']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portal_template_folders_id_templates_fk_put" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portal_template_folders_id_templates_fk_put`")
# verify the required parameter 'fk' is set
if ('fk' not in params) or (params['fk'] is None):
raise ValueError("Missing the required parameter `fk` when calling `portal_template_folders_id_templates_fk_put`")
collection_formats = {}
resource_path = '/PortalTemplateFolders/{id}/templates/{fk}'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
if 'fk' in params:
path_params['fk'] = params['fk']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'data' in params:
body_params = params['data']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Template',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portal_template_folders_id_templates_get(self, id, **kwargs):
"""
Queries templates of PortalTemplateFolder.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_template_folders_id_templates_get(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: PortalTemplateFolder id (required)
:param str filter:
:return: list[Template]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portal_template_folders_id_templates_get_with_http_info(id, **kwargs)
else:
(data) = self.portal_template_folders_id_templates_get_with_http_info(id, **kwargs)
return data
def portal_template_folders_id_templates_get_with_http_info(self, id, **kwargs):
"""
Queries templates of PortalTemplateFolder.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_template_folders_id_templates_get_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: PortalTemplateFolder id (required)
:param str filter:
:return: list[Template]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'filter']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portal_template_folders_id_templates_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portal_template_folders_id_templates_get`")
collection_formats = {}
resource_path = '/PortalTemplateFolders/{id}/templates'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
if 'filter' in params:
query_params['filter'] = params['filter']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Template]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portal_template_folders_id_templates_post(self, id, **kwargs):
"""
Creates a new instance in templates of this model.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_template_folders_id_templates_post(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: PortalTemplateFolder id (required)
:param Template data:
:return: Template
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portal_template_folders_id_templates_post_with_http_info(id, **kwargs)
else:
(data) = self.portal_template_folders_id_templates_post_with_http_info(id, **kwargs)
return data
def portal_template_folders_id_templates_post_with_http_info(self, id, **kwargs):
"""
Creates a new instance in templates of this model.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_template_folders_id_templates_post_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: PortalTemplateFolder id (required)
:param Template data:
:return: Template
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'data']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portal_template_folders_id_templates_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portal_template_folders_id_templates_post`")
collection_formats = {}
resource_path = '/PortalTemplateFolders/{id}/templates'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'data' in params:
body_params = params['data']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Template',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portal_template_folders_id_templates_rel_fk_delete(self, id, fk, **kwargs):
"""
Remove the templates relation to an item by id.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_template_folders_id_templates_rel_fk_delete(id, fk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: PortalTemplateFolder id (required)
:param str fk: Foreign key for templates (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portal_template_folders_id_templates_rel_fk_delete_with_http_info(id, fk, **kwargs)
else:
(data) = self.portal_template_folders_id_templates_rel_fk_delete_with_http_info(id, fk, **kwargs)
return data
def portal_template_folders_id_templates_rel_fk_delete_with_http_info(self, id, fk, **kwargs):
"""
Remove the templates relation to an item by id.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_template_folders_id_templates_rel_fk_delete_with_http_info(id, fk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: PortalTemplateFolder id (required)
:param str fk: Foreign key for templates (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'fk']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portal_template_folders_id_templates_rel_fk_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portal_template_folders_id_templates_rel_fk_delete`")
# verify the required parameter 'fk' is set
if ('fk' not in params) or (params['fk'] is None):
raise ValueError("Missing the required parameter `fk` when calling `portal_template_folders_id_templates_rel_fk_delete`")
collection_formats = {}
resource_path = '/PortalTemplateFolders/{id}/templates/rel/{fk}'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
if 'fk' in params:
path_params['fk'] = params['fk']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portal_template_folders_id_templates_rel_fk_head(self, id, fk, **kwargs):
"""
Check the existence of templates relation to an item by id.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_template_folders_id_templates_rel_fk_head(id, fk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: PortalTemplateFolder id (required)
:param str fk: Foreign key for templates (required)
:return: bool
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portal_template_folders_id_templates_rel_fk_head_with_http_info(id, fk, **kwargs)
else:
(data) = self.portal_template_folders_id_templates_rel_fk_head_with_http_info(id, fk, **kwargs)
return data
def portal_template_folders_id_templates_rel_fk_head_with_http_info(self, id, fk, **kwargs):
"""
Check the existence of templates relation to an item by id.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_template_folders_id_templates_rel_fk_head_with_http_info(id, fk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: PortalTemplateFolder id (required)
:param str fk: Foreign key for templates (required)
:return: bool
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'fk']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portal_template_folders_id_templates_rel_fk_head" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portal_template_folders_id_templates_rel_fk_head`")
# verify the required parameter 'fk' is set
if ('fk' not in params) or (params['fk'] is None):
raise ValueError("Missing the required parameter `fk` when calling `portal_template_folders_id_templates_rel_fk_head`")
collection_formats = {}
resource_path = '/PortalTemplateFolders/{id}/templates/rel/{fk}'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
if 'fk' in params:
path_params['fk'] = params['fk']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'HEAD',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='bool',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portal_template_folders_id_templates_rel_fk_put(self, id, fk, **kwargs):
"""
Add a related item by id for templates.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_template_folders_id_templates_rel_fk_put(id, fk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: PortalTemplateFolder id (required)
:param str fk: Foreign key for templates (required)
:param PortalTemplate data:
:return: PortalTemplate
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portal_template_folders_id_templates_rel_fk_put_with_http_info(id, fk, **kwargs)
else:
(data) = self.portal_template_folders_id_templates_rel_fk_put_with_http_info(id, fk, **kwargs)
return data
def portal_template_folders_id_templates_rel_fk_put_with_http_info(self, id, fk, **kwargs):
"""
Add a related item by id for templates.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_template_folders_id_templates_rel_fk_put_with_http_info(id, fk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: PortalTemplateFolder id (required)
:param str fk: Foreign key for templates (required)
:param PortalTemplate data:
:return: PortalTemplate
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'fk', 'data']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portal_template_folders_id_templates_rel_fk_put" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portal_template_folders_id_templates_rel_fk_put`")
# verify the required parameter 'fk' is set
if ('fk' not in params) or (params['fk'] is None):
raise ValueError("Missing the required parameter `fk` when calling `portal_template_folders_id_templates_rel_fk_put`")
collection_formats = {}
resource_path = '/PortalTemplateFolders/{id}/templates/rel/{fk}'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
if 'fk' in params:
path_params['fk'] = params['fk']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'data' in params:
body_params = params['data']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PortalTemplate',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portal_template_folders_post(self, **kwargs):
"""
Create a new instance of the model and persist it into the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_template_folders_post(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param PortalTemplateFolder data: Model instance data
:return: PortalTemplateFolder
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portal_template_folders_post_with_http_info(**kwargs)
else:
(data) = self.portal_template_folders_post_with_http_info(**kwargs)
return data
def portal_template_folders_post_with_http_info(self, **kwargs):
"""
Create a new instance of the model and persist it into the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portal_template_folders_post_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param PortalTemplateFolder data: Model instance data
:return: PortalTemplateFolder
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['data']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portal_template_folders_post" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/PortalTemplateFolders'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'data' in params:
body_params = params['data']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PortalTemplateFolder',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
| 42.376692
| 165
| 0.57519
| 14,767
| 143,996
| 5.367983
| 0.018555
| 0.060553
| 0.064376
| 0.05832
| 0.98548
| 0.98466
| 0.983512
| 0.97781
| 0.974265
| 0.972486
| 0
| 0.000749
| 0.341433
| 143,996
| 3,397
| 166
| 42.389167
| 0.835149
| 0.313925
| 0
| 0.84625
| 0
| 0
| 0.1913
| 0.081612
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038125
| false
| 0
| 0.004375
| 0
| 0.099375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
98a112da839730cd697c613ef880b86ce8a59b9c
| 198
|
py
|
Python
|
test/automation/dataset/__init__.py
|
agupta54/ulca
|
c1f570ac254ce2ac73f40c49716458f4f7cbaee2
|
[
"MIT"
] | 3
|
2022-01-12T06:51:51.000Z
|
2022-02-23T18:54:33.000Z
|
test/automation/dataset/__init__.py
|
agupta54/ulca
|
c1f570ac254ce2ac73f40c49716458f4f7cbaee2
|
[
"MIT"
] | 6
|
2021-08-31T19:21:26.000Z
|
2022-01-03T05:53:42.000Z
|
test/automation/dataset/__init__.py
|
agupta54/ulca
|
c1f570ac254ce2ac73f40c49716458f4f7cbaee2
|
[
"MIT"
] | 8
|
2021-08-12T08:07:49.000Z
|
2022-01-25T04:40:51.000Z
|
from . import loader
from . import dataset_all
from . import dataset_csv
from . import dataset_submit
from . import dataset_search
from . import dataset_download
from . import dataset_submit_status
| 24.75
| 35
| 0.823232
| 28
| 198
| 5.571429
| 0.357143
| 0.448718
| 0.653846
| 0.294872
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.141414
| 198
| 7
| 36
| 28.285714
| 0.917647
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
7f37d4b60c9ec37350e9f86849ee7281177da1c4
| 135
|
py
|
Python
|
tests/test_version.py
|
jed-frey/Ping
|
9297ca00fbbcf0cf3563ca9186e852a552ac79c9
|
[
"BSD-3-Clause"
] | 1
|
2018-08-06T04:42:14.000Z
|
2018-08-06T04:42:14.000Z
|
tests/test_version.py
|
jed-frey/WinPythonToxJenkins
|
9297ca00fbbcf0cf3563ca9186e852a552ac79c9
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_version.py
|
jed-frey/WinPythonToxJenkins
|
9297ca00fbbcf0cf3563ca9186e852a552ac79c9
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
def test_version1():
assert sys.version_info >= (3,0)
def test_version2():
assert sys.version_info <= (3,0)
| 19.285714
| 37
| 0.644444
| 20
| 135
| 4.15
| 0.55
| 0.168675
| 0.385542
| 0.481928
| 0.53012
| 0.53012
| 0
| 0
| 0
| 0
| 0
| 0.057143
| 0.222222
| 135
| 7
| 38
| 19.285714
| 0.733333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.4
| 1
| 0.4
| true
| 0
| 0.2
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
7f743b8f41e0c7f8895dff2b520bf3bec16a9ed5
| 2,268
|
py
|
Python
|
fastweb/test/test_pool.py
|
BSlience/fastweb
|
2c1b956e9846c4205d0201d39d09891d088754e4
|
[
"Apache-2.0"
] | 123
|
2017-06-06T04:59:07.000Z
|
2019-07-11T10:20:35.000Z
|
fastweb/test/test_pool.py
|
BSlience/fastweb
|
2c1b956e9846c4205d0201d39d09891d088754e4
|
[
"Apache-2.0"
] | null | null | null |
fastweb/test/test_pool.py
|
BSlience/fastweb
|
2c1b956e9846c4205d0201d39d09891d088754e4
|
[
"Apache-2.0"
] | 2
|
2017-06-28T05:58:39.000Z
|
2018-09-25T00:18:33.000Z
|
# coding:utf8
from fastweb.accesspoint import ioloop
from fastweb.component.db.mysql import SyncMysql, AsynMysql
from fastweb.pool import SyncConnectionPool, AsynConnectionPool
setting = {'host': 'localhost', 'port': 3306, 'user': 'root', 'password': ''}
class TestSyncPool(object):
def test_create(self):
pool = SyncConnectionPool(SyncMysql, setting, name='test sync mysql pool', size=5, awake=10)
pool.create()
def test_add_connection(self):
pool = SyncConnectionPool(SyncMysql, setting, name='test sync mysql pool', size=5, awake=10)
pool.create()
pool.add_connection()
def test_get_connection(self):
pool = SyncConnectionPool(SyncMysql, setting, name='test sync mysql pool', size=5, awake=10)
pool.create()
assert isinstance(pool.get_connection(), SyncMysql)
def test_rescue(self):
pool = SyncConnectionPool(SyncMysql, setting, name='test sync mysql pool', size=5, awake=10)
pool.create()
pool.rescue()
def test_concurrency_get_connection(self):
"""大并发下的获取连接"""
pass
def test_maxconn_rescue(self):
"""动态扩展到最大连接数"""
pass
class TestAsynPool(object):
def test_create(self):
pool = AsynConnectionPool(AsynMysql, setting, name='test asyn pool', size=5, awake=10)
ioloop.IOLoop.current().run_sync(pool.create)
def test_add_connection(self):
pool = AsynConnectionPool(AsynMysql, setting, name='test asyn pool', size=5, awake=10)
ioloop.IOLoop.current().run_sync(pool.create)
ioloop.IOLoop.current().run_sync(pool.add_connection)
def test_get_connection(self):
pool = AsynConnectionPool(AsynMysql, setting, name='test asyn pool', size=5, awake=10)
ioloop.IOLoop.current().run_sync(pool.create)
assert isinstance(pool.get_connection(), AsynMysql)
def test_rescue(self):
pool = AsynConnectionPool(AsynMysql, setting, name='test asyn pool', size=5, awake=10)
ioloop.IOLoop.current().run_sync(pool.create)
ioloop.IOLoop.current().run_sync(pool.rescue)
def test_concurrency_get_connection(self):
"""大并发下的获取连接"""
pass
def test_maxconn_rescue(self):
"""动态扩展到最大连接数"""
pass
| 32.869565
| 100
| 0.675485
| 269
| 2,268
| 5.576208
| 0.189591
| 0.056
| 0.08
| 0.074667
| 0.834667
| 0.817333
| 0.792
| 0.748
| 0.734667
| 0.704
| 0
| 0.016066
| 0.204145
| 2,268
| 68
| 101
| 33.352941
| 0.814958
| 0.02381
| 0
| 0.727273
| 0
| 0
| 0.077134
| 0
| 0
| 0
| 0
| 0
| 0.045455
| 1
| 0.272727
| false
| 0.113636
| 0.068182
| 0
| 0.386364
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 9
|
7fb376e35adad761facc7a7091aacb89f01d5da7
| 11,125
|
py
|
Python
|
test/test_collector.py
|
douardda/tidypy
|
9d4c6470af8e0ca85209333a99787290f36498d4
|
[
"MIT"
] | 33
|
2018-03-13T21:30:51.000Z
|
2022-01-17T02:14:59.000Z
|
test/test_collector.py
|
douardda/tidypy
|
9d4c6470af8e0ca85209333a99787290f36498d4
|
[
"MIT"
] | 89
|
2017-10-14T16:39:20.000Z
|
2022-03-06T17:23:36.000Z
|
test/test_collector.py
|
douardda/tidypy
|
9d4c6470af8e0ca85209333a99787290f36498d4
|
[
"MIT"
] | 9
|
2018-05-17T16:11:12.000Z
|
2021-09-15T01:48:16.000Z
|
import random
from tidypy import Collector, Issue, TidyPyIssue, get_default_config
class FooIssue(Issue):
tool = 'foo'
class BarIssue(Issue):
tool = 'bar'
def test_basics():
collector = Collector(get_default_config())
assert collector.get_issues() == []
assert collector.issue_count() == 0
assert collector.issue_count(include_unclean=True) == 0
collector.add_issues(
FooIssue('test', 'test message', 'test/file.ext', 1),
)
collector.add_issues([
FooIssue('test2', 'test message 2', 'test/file.ext', 2),
BarIssue('test3', 'test message 3', 'test/file.ext', 3),
])
assert collector.issue_count() == 3
assert collector.issue_count(include_unclean=True) == 3
def test_disabled():
cfg = get_default_config()
cfg['disabled'] = ['foo']
collector = Collector(cfg)
collector.add_issues([
TidyPyIssue('code1', 'message 1', 'test1.py', 2),
TidyPyIssue('foo', 'message 2', 'test2.py', 3),
])
assert collector.issue_count() == 1
assert collector.issue_count(include_unclean=True) == 2
assert collector.get_issues()[0].code == 'code1'
def test_sort_issues_default():
collector = Collector(get_default_config())
expected = [
BarIssue('test', 'test message', 'test/file.ext', 2),
BarIssue('test2', 'test message', 'test/file.ext', 2),
FooIssue('test', 'test message', 'test/file.ext', 2),
FooIssue('test2', 'test message', 'test/file.ext', 2),
BarIssue('test', 'test message', 'test/file.ext', 2, 5),
BarIssue('test2', 'test message', 'test/file.ext', 2, 5),
FooIssue('test', 'test message', 'test/file.ext', 2, 5),
FooIssue('test2', 'test message', 'test/file.ext', 2, 5),
BarIssue('test', 'test message', 'test/file.ext', 5, 3),
BarIssue('test2', 'test message', 'test/file.ext', 5, 3),
FooIssue('test', 'test message', 'test/file.ext', 5, 3),
FooIssue('test2', 'test message', 'test/file.ext', 5, 3),
BarIssue('test', 'test message', 'test/file2.ext', 2),
BarIssue('test2', 'test message', 'test/file2.ext', 2),
FooIssue('test', 'test message', 'test/file2.ext', 2),
FooIssue('test2', 'test message', 'test/file2.ext', 2),
BarIssue('test', 'test message', 'test/file2.ext', 2, 5),
BarIssue('test2', 'test message', 'test/file2.ext', 2, 5),
FooIssue('test', 'test message', 'test/file2.ext', 2, 5),
FooIssue('test2', 'test message', 'test/file2.ext', 2, 5),
BarIssue('test', 'test message', 'test/file2.ext', 5, 3),
BarIssue('test2', 'test message', 'test/file2.ext', 5, 3),
FooIssue('test', 'test message', 'test/file2.ext', 5, 3),
FooIssue('test2', 'test message', 'test/file2.ext', 5, 3),
]
shuffled = [] + expected
random.shuffle(shuffled)
assert collector._sort_issues(shuffled) == expected
def test_sort_issues_custom():
collector = Collector(get_default_config())
expected = [
BarIssue('test', 'test message', 'test/file.ext', 2),
BarIssue('test2', 'test message', 'test/file.ext', 2),
BarIssue('test2', 'test message', 'test/file.ext', 5),
FooIssue('test', 'test message', 'test/file.ext', 2),
FooIssue('test2', 'test message', 'test/file.ext', 2),
FooIssue('test2', 'test message', 'test/file.ext', 5),
]
shuffled = [] + expected
random.shuffle(shuffled)
assert collector._sort_issues(shuffled, ('tool', 'code', 'line')) == expected
def test_sort_issues_empty():
collector = Collector(get_default_config())
expected = [
BarIssue('test', 'test message', 'test/file.ext', 2),
BarIssue('test2', 'test message', 'test/file.ext', 2),
BarIssue('test2', 'test message', 'test/file.ext', 5),
FooIssue('test', 'test message', 'test/file.ext', 2),
FooIssue('test2', 'test message', 'test/file.ext', 2),
FooIssue('test2', 'test message', 'test/file.ext', 5),
]
assert collector._sort_issues(expected, collector.NO_SORT) == expected
def test_get_grouped_issues_default():
cfg = get_default_config()
cfg['merge-issues'] = False
collector = Collector(cfg)
expected = {
'test/file.ext': [
BarIssue('test', 'test message', 'test/file.ext', 2),
BarIssue('test2', 'test message', 'test/file.ext', 2),
FooIssue('test', 'test message', 'test/file.ext', 2),
FooIssue('test2', 'test message', 'test/file.ext', 2),
BarIssue('test', 'test message', 'test/file.ext', 2, 5),
BarIssue('test2', 'test message', 'test/file.ext', 2, 5),
FooIssue('test', 'test message', 'test/file.ext', 2, 5),
FooIssue('test2', 'test message', 'test/file.ext', 2, 5),
BarIssue('test', 'test message', 'test/file.ext', 5, 3),
BarIssue('test2', 'test message', 'test/file.ext', 5, 3),
FooIssue('test', 'test message', 'test/file.ext', 5, 3),
FooIssue('test2', 'test message', 'test/file.ext', 5, 3),
],
'test/file2.ext': [
BarIssue('test', 'test message', 'test/file2.ext', 2),
BarIssue('test2', 'test message', 'test/file2.ext', 2),
FooIssue('test', 'test message', 'test/file2.ext', 2),
FooIssue('test2', 'test message', 'test/file2.ext', 2),
BarIssue('test', 'test message', 'test/file2.ext', 2, 5),
BarIssue('test2', 'test message', 'test/file2.ext', 2, 5),
FooIssue('test', 'test message', 'test/file2.ext', 2, 5),
FooIssue('test2', 'test message', 'test/file2.ext', 2, 5),
BarIssue('test', 'test message', 'test/file2.ext', 5, 3),
BarIssue('test2', 'test message', 'test/file2.ext', 5, 3),
FooIssue('test', 'test message', 'test/file2.ext', 5, 3),
FooIssue('test2', 'test message', 'test/file2.ext', 5, 3),
],
}
shuffled = []
for issues in expected.values():
shuffled += issues
random.shuffle(shuffled)
collector.add_issues(shuffled)
assert collector.get_grouped_issues() == expected
assert collector.issue_count() == len(shuffled)
assert collector.issue_count(include_unclean=True) == len(shuffled)
def test_get_grouped_issues_custom():
collector = Collector(get_default_config())
expected = {
'test': [
BarIssue('test', 'test message', 'test/file.ext', 2),
FooIssue('test', 'test message', 'test/file.ext', 2),
],
'test2': [
BarIssue('test2', 'test message', 'test/file.ext', 2),
BarIssue('test2', 'test message', 'test/file.ext', 5),
FooIssue('test2', 'test message', 'test/file.ext', 2),
FooIssue('test2', 'test message', 'test/file.ext', 5),
],
}
shuffled = []
for issues in expected.values():
shuffled += issues
random.shuffle(shuffled)
collector.add_issues(shuffled)
assert collector.get_grouped_issues(lambda x: x.code, ('tool', 'line')) == expected
assert collector.issue_count() == len(shuffled)
assert collector.issue_count(include_unclean=True) == len(shuffled)
def test_disabled():
cfg = get_default_config()
cfg['disabled'] = ['test']
collector = Collector(cfg)
issues = [
TidyPyIssue('test', 'test message', 'test/file.ext', 2),
TidyPyIssue('test2', 'test message', 'test/file.ext', 2),
TidyPyIssue('test3', 'test message', 'test/file.ext', 2),
]
collector.add_issues(issues)
assert collector.get_issues() == issues[1:]
assert collector.issue_count() == 2
assert collector.issue_count(include_unclean=True) == 3
def test_merging_dupes():
collector = Collector(get_default_config())
issues = [
TidyPyIssue('test', 'test message', 'test/file.ext', 2),
TidyPyIssue('test', 'test message', 'test/file.ext', 2),
TidyPyIssue('test2', 'test message', 'test/file.ext', 2),
TidyPyIssue('test', 'test message', 'test/file.ext', 3),
TidyPyIssue('test2', 'test message', 'test/file2.ext', 2),
]
collector.add_issues(issues)
assert collector.get_issues() == [issues[0]] + issues[2:]
assert collector.issue_count() == 4
assert collector.issue_count(include_unclean=True) == 5
def test_noqa(tmpdir):
project_dir = tmpdir.mkdir('noqa')
py_file = project_dir.join('file.py')
py_file.write('\nsomething # noqa: test1\n\n# NoQA\n\n\n\n# NOqa: tidypy:test6,foobar,@bar')
yaml_file = project_dir.join('file.yaml')
cfg = get_default_config()
cfg['merge-issues'] = False
collector = Collector(cfg)
good_issues = [
TidyPyIssue('test', 'test message', str(py_file), 2),
TidyPyIssue('test4', 'test message', str(py_file), 6),
TidyPyIssue('test5', 'test message', str(yaml_file), 2),
TidyPyIssue('test7', 'test message', str(py_file), 8),
BarIssue('test8', 'test message', str(py_file), 7),
]
filtered_issues = [
TidyPyIssue('test1', 'test message', str(py_file), 2),
TidyPyIssue('test2', 'test message', str(py_file), 4),
TidyPyIssue('test3', 'test message', str(py_file), 4),
TidyPyIssue('test6', 'test message', str(py_file), 8),
BarIssue('test9', 'test message', str(py_file), 8),
]
collector.add_issues(good_issues)
collector.add_issues(filtered_issues)
assert good_issues == collector.get_issues(sortby=collector.NO_SORT)
assert collector.issue_count() == len(good_issues)
assert collector.issue_count(include_unclean=True) == len(good_issues + filtered_issues)
def test_noqa_disabled(tmpdir):
project_dir = tmpdir.mkdir('noqa')
py_file = project_dir.join('file.py')
py_file.write('\nsomething # noqa: test1\n\n# NoQA\n\n\n\n# NOqa: tidypy:test6,foobar,@bar')
yaml_file = project_dir.join('file.yaml')
cfg = get_default_config()
cfg['merge-issues'] = False
cfg['noqa'] = False
collector = Collector(cfg)
good_issues = [
TidyPyIssue('test', 'test message', str(py_file), 2),
TidyPyIssue('test4', 'test message', str(py_file), 6),
TidyPyIssue('test5', 'test message', str(yaml_file), 2),
TidyPyIssue('test7', 'test message', str(py_file), 8),
BarIssue('test8', 'test message', str(py_file), 7),
]
filtered_issues = [
TidyPyIssue('test1', 'test message', str(py_file), 2),
TidyPyIssue('test2', 'test message', str(py_file), 4),
TidyPyIssue('test3', 'test message', str(py_file), 4),
TidyPyIssue('test6', 'test message', str(py_file), 8),
BarIssue('test9', 'test message', str(py_file), 8),
]
collector.add_issues(good_issues)
collector.add_issues(filtered_issues)
assert good_issues + filtered_issues == collector.get_issues(sortby=collector.NO_SORT)
assert collector.issue_count() == len(good_issues + filtered_issues)
assert collector.issue_count(include_unclean=True) == len(good_issues + filtered_issues)
| 39.590747
| 97
| 0.609978
| 1,378
| 11,125
| 4.811321
| 0.063135
| 0.160935
| 0.169683
| 0.143288
| 0.904223
| 0.855656
| 0.851885
| 0.823529
| 0.811765
| 0.800302
| 0
| 0.027892
| 0.213663
| 11,125
| 280
| 98
| 39.732143
| 0.729995
| 0
| 0
| 0.722944
| 0
| 0.008658
| 0.270227
| 0.004315
| 0
| 0
| 0
| 0
| 0.125541
| 1
| 0.047619
| false
| 0
| 0.008658
| 0
| 0.073593
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f68cd47241ac64a6443eccd3f3f9b8f20163fb9e
| 1,825
|
py
|
Python
|
modules/2.79/bpy/ops/surface.py
|
cmbasnett/fake-bpy-module
|
acb8b0f102751a9563e5b5e5c7cd69a4e8aa2a55
|
[
"MIT"
] | null | null | null |
modules/2.79/bpy/ops/surface.py
|
cmbasnett/fake-bpy-module
|
acb8b0f102751a9563e5b5e5c7cd69a4e8aa2a55
|
[
"MIT"
] | null | null | null |
modules/2.79/bpy/ops/surface.py
|
cmbasnett/fake-bpy-module
|
acb8b0f102751a9563e5b5e5c7cd69a4e8aa2a55
|
[
"MIT"
] | null | null | null |
def primitive_nurbs_surface_circle_add(radius=1.0, view_align=False, enter_editmode=False, location=(0.0, 0.0, 0.0), rotation=(0.0, 0.0, 0.0), layers=(False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False)):
pass
def primitive_nurbs_surface_curve_add(radius=1.0, view_align=False, enter_editmode=False, location=(0.0, 0.0, 0.0), rotation=(0.0, 0.0, 0.0), layers=(False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False)):
pass
def primitive_nurbs_surface_cylinder_add(radius=1.0, view_align=False, enter_editmode=False, location=(0.0, 0.0, 0.0), rotation=(0.0, 0.0, 0.0), layers=(False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False)):
pass
def primitive_nurbs_surface_sphere_add(radius=1.0, view_align=False, enter_editmode=False, location=(0.0, 0.0, 0.0), rotation=(0.0, 0.0, 0.0), layers=(False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False)):
pass
def primitive_nurbs_surface_surface_add(radius=1.0, view_align=False, enter_editmode=False, location=(0.0, 0.0, 0.0), rotation=(0.0, 0.0, 0.0), layers=(False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False)):
pass
def primitive_nurbs_surface_torus_add(radius=1.0, view_align=False, enter_editmode=False, location=(0.0, 0.0, 0.0), rotation=(0.0, 0.0, 0.0), layers=(False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False)):
pass
| 73
| 294
| 0.713973
| 306
| 1,825
| 4.140523
| 0.071895
| 0.899763
| 1.278611
| 1.610103
| 0.951855
| 0.951855
| 0.951855
| 0.951855
| 0.951855
| 0.951855
| 0
| 0.052402
| 0.121644
| 1,825
| 24
| 295
| 76.041667
| 0.737991
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0.5
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 11
|
f6b1678c5b1c3fefa835361123cedd903b12d79c
| 20,301
|
py
|
Python
|
tests/test_util.py
|
mritv/edflow
|
9bef77e32a74d9ee4108182d8cab177b5dd36731
|
[
"MIT"
] | 1
|
2020-04-01T07:41:21.000Z
|
2020-04-01T07:41:21.000Z
|
tests/test_util.py
|
mritv/edflow
|
9bef77e32a74d9ee4108182d8cab177b5dd36731
|
[
"MIT"
] | null | null | null |
tests/test_util.py
|
mritv/edflow
|
9bef77e32a74d9ee4108182d8cab177b5dd36731
|
[
"MIT"
] | null | null | null |
import pytest
import copy
from edflow.util import (
set_value,
retrieve,
walk,
set_default,
contains_key,
KeyNotFoundError,
get_leaf_names,
)
from edflow import util
from itertools import product
# ================= set_value ====================
def test_pop_value_from_key():
collection = {"a": [1, 2]}
key = "a"
popped_value = util.pop_value_from_key(collection, key)
expected_value = [1, 2]
assert expected_value == popped_value
def pytest_generate_tests(metafunc):
# called once per each test class
# http://doc.pytest.org/en/latest/example/parametrize.html
if metafunc.cls is not None:
funcarglist = metafunc.cls.params[metafunc.function.__name__]
argnames = metafunc.cls.argnames
metafunc.parametrize(argnames, funcarglist)
def make_collection():
collection = {"a": [1, 2], "b": {"c": {"d": 1}}, "e": 2}
return collection
class Test_pop_keypath:
argnames = ("collection", "key", "expected_value")
params = {
"test_pop_keypath": [
(make_collection(), "a", [1, 2]),
(make_collection(), "b/c/d", 1),
(make_collection(), "a/0", 1),
],
"test_default": [
(make_collection(), "f", "abc"),
(make_collection(), "a/4", "abc"),
(make_collection(), "b/c/e", "abc"),
],
"test_raise_keyNotFoundError": [
(make_collection(), "f", None),
(make_collection(), "a/4", None),
(make_collection(), "b/c/e", None),
],
"test_pass_success": [
(make_collection(), "f", ("abc", False)),
(make_collection(), "a/4", ("abc", False)),
(make_collection(), "b/c/e", ("abc", False)),
(make_collection(), "a", ([1, 2], True)),
(make_collection(), "a/0", (1, True)),
(make_collection(), "b/c/d", (1, True)),
],
"test_raise_keyNotFoundError_pass_success": [
(make_collection(), "f", None),
(make_collection(), "a/4", None),
(make_collection(), "b/c/e", None),
],
"test_pass_sucess_default": [
(make_collection(), "a", ([1, 2], True)),
(make_collection(), "a/0", (1, True)),
(make_collection(), "b/c/d", (1, True)),
],
}
def test_pop_keypath(self, collection, key, expected_value):
popped_value = util.pop_keypath(collection, key)
assert expected_value == popped_value
def test_default(self, collection, key, expected_value):
popped_value = util.pop_keypath(collection, key, default="abc")
assert expected_value == popped_value
def test_raise_keyNotFoundError(self, collection, key, expected_value):
with pytest.raises(KeyNotFoundError) as exc_info:
util.pop_keypath(collection, key)
def test_pass_success(self, collection, key, expected_value):
popped_value = util.pop_keypath(
collection, key, default="abc", pass_success=True
)
assert expected_value == popped_value
def test_raise_keyNotFoundError_pass_success(self, collection, key, expected_value):
with pytest.raises(KeyNotFoundError) as exc_info:
util.pop_keypath(collection, key, pass_success=True)
def test_pass_sucess_default(self, collection, key, expected_value):
popped_value = util.pop_keypath(
collection, key, default="abc", pass_success=True
)
assert expected_value == popped_value
def test_keyNotFoundError():
with pytest.raises(KeyNotFoundError) as exc_info:
raise KeyNotFoundError("test")
with pytest.raises(KeyNotFoundError) as exc_info:
try:
a = {"a": "b"}
a.pop("c")
except (KeyError, IndexError) as e:
raise KeyNotFoundError(e)
def test_set_value_fail():
with pytest.raises(Exception):
dol = {"a": [1, 2], "b": {"c": {"d": 1}, "e": 2}}
set_value(dol, "a/g", 3) # should raise
def test_set_value():
dol = {"a": [1, 2], "b": {"c": {"d": 1}, "e": 2}}
ref = {"a": [3, 2], "b": {"c": {"d": 1}, "e": 2}}
set_value(dol, "a/0", 3)
assert dol == ref
ref = {"a": [3, 2], "b": {"c": {"d": 1}, "e": 3}}
set_value(dol, "b/e", 3)
assert dol == ref
set_value(dol, "a/1/f", 3)
ref = {"a": [3, {"f": 3}], "b": {"c": {"d": 1}, "e": 3}}
assert dol == ref
def test_append_to_list():
dol = {"a": [1, 2], "b": {"c": {"d": 1}, "e": 2}}
set_value(dol, "a/2", 3)
ref = {"a": [1, 2, 3], "b": {"c": {"d": 1}, "e": 2}}
assert dol == ref
set_value(dol, "a/5", 6)
ref = {"a": [1, 2, 3, None, None, 6], "b": {"c": {"d": 1}, "e": 2}}
assert dol == ref
def test_add_key():
dol = {"a": [1, 2], "b": {"c": {"d": 1}, "e": 2}}
set_value(dol, "f", 3)
ref = {"a": [1, 2], "b": {"c": {"d": 1}, "e": 2}, "f": 3}
assert dol == ref
set_value(dol, "b/1", 3)
ref = {"a": [1, 2], "b": {"c": {"d": 1}, "e": 2, 1: 3}, "f": 3}
assert dol == ref
def test_fancy_overwriting():
dol = {"a": [1, 2], "b": {"c": {"d": 1}}, "e": 2}
set_value(dol, "e/f", 3)
ref = {"a": [1, 2], "b": {"c": {"d": 1}}, "e": {"f": 3}}
assert ref == dol
set_value(dol, "e/f/1/g", 3)
ref = {"a": [1, 2], "b": {"c": {"d": 1}}, "e": {"f": [None, {"g": 3}]}}
assert ref == dol
def test_top_is_dict():
dol = {"a": [1, 2], "b": {"c": {"d": 1}}, "e": 2}
set_value(dol, "h", 4)
ref = {"a": [1, 2], "b": {"c": {"d": 1}}, "e": 2, "h": 4}
assert ref == dol
set_value(dol, "i/j/k", 4)
ref = {"a": [1, 2], "b": {"c": {"d": 1}}, "e": 2, "h": 4, "i": {"j": {"k": 4}}}
assert ref == dol
set_value(dol, "j/0/k", 4)
ref = {
"a": [1, 2],
"b": {"c": {"d": 1}},
"e": 2,
"h": 4,
"i": {"j": {"k": 4}},
"j": [{"k": 4}],
}
assert ref == dol
def test_top_is_list():
dol = [{"a": [1, 2], "b": {"c": {"d": 1}}, "e": 2}, 2, 3]
set_value(dol, "0/k", 4)
ref = [{"a": [1, 2], "b": {"c": {"d": 1}}, "e": 2, "k": 4}, 2, 3]
assert ref == dol
set_value(dol, "0", 1)
ref = [1, 2, 3]
assert ref == dol
# ==================== retrieve ==================
def test_retrieve():
dol = {"a": [1, 2], "b": {"c": {"d": 1}}, "e": 2}
val = retrieve(dol, "a")
ref = [1, 2]
assert val == ref
val = retrieve(dol, "a/0")
ref = 1
assert val == ref
val = retrieve(dol, "b/c/d")
ref = 1
assert val == ref
def test_retrieve_default():
dol = {"a": [1, 2], "b": {"c": {"d": 1}}, "e": 2}
ref = "abc"
val = retrieve(dol, "f", default="abc")
assert val == ref
val = retrieve(dol, "a/4", default="abc")
assert val == ref
val = retrieve(dol, "b/c/e", default="abc")
assert val == ref
def test_retrieve_fail():
dol = {"a": [1, 2], "b": {"c": {"d": 1}}, "e": 2}
with pytest.raises(Exception):
val = retrieve(dol, "f")
with pytest.raises(Exception):
val = retrieve(dol, "a/4")
with pytest.raises(Exception):
val = retrieve(dol, "b/c/e")
def test_retrieve_pass_success():
dol = {"a": [1, 2], "b": {"c": {"d": 1}}, "e": 2}
ref = "abc", False
val = retrieve(dol, "f", default="abc", pass_success=True)
assert val == ref
val = retrieve(dol, "a/4", default="abc", pass_success=True)
assert val == ref
val = retrieve(dol, "b/c/e", default="abc", pass_success=True)
assert val == ref
ref = [1, 2], True
val = retrieve(dol, "a", default="abc", pass_success=True)
assert val == ref
ref = 1, True
val = retrieve(dol, "a/0", default="abc", pass_success=True)
assert val == ref
ref = 1, True
val = retrieve(dol, "b/c/d", default="abc", pass_success=True)
assert val == ref
def test_retrieve_pass_success_fail():
dol = {"a": [1, 2], "b": {"c": {"d": 1}}, "e": 2}
with pytest.raises(Exception):
retrieve(dol, "f", pass_success=True)
with pytest.raises(Exception):
retrieve(dol, "a/4", pass_success=True)
with pytest.raises(Exception):
retrieve(dol, "b/c/e", pass_success=True)
ref = [1, 2], True
val = retrieve(dol, "a", pass_success=True)
assert val == ref
ref = 1, True
val = retrieve(dol, "a/0", pass_success=True)
assert val == ref
ref = 1, True
val = retrieve(dol, "b/c/d", pass_success=True)
assert val == ref
# -------------------- retrieve with expand=False ------------------
def test_retrieve_ef():
dol = {"a": [1, 2], "b": {"c": {"d": 1}}, "e": 2}
val = retrieve(dol, "a", expand=False)
ref = [1, 2]
assert val == ref
val = retrieve(dol, "a/0", expand=False)
ref = 1
assert val == ref
val = retrieve(dol, "b/c/d", expand=False)
ref = 1
assert val == ref
def test_retrieve_default_ef():
dol = {"a": [1, 2], "b": {"c": {"d": 1}}, "e": 2}
ref = "abc"
val = retrieve(dol, "f", default="abc", expand=False)
assert val == ref
val = retrieve(dol, "a/4", default="abc", expand=False)
assert val == ref
val = retrieve(dol, "b/c/e", default="abc", expand=False)
assert val == ref
def test_retrieve_fail_ef():
dol = {"a": [1, 2], "b": {"c": {"d": 1}}, "e": 2}
with pytest.raises(Exception):
val = retrieve(dol, "f", expand=False)
with pytest.raises(Exception):
val = retrieve(dol, "a/4", expand=False)
with pytest.raises(Exception):
val = retrieve(dol, "b/c/e", expand=False)
def test_retrieve_pass_success_ef():
dol = {"a": [1, 2], "b": {"c": {"d": 1}}, "e": 2}
ref = "abc", False
val = retrieve(dol, "f", default="abc", pass_success=True, expand=False)
assert val == ref
val = retrieve(dol, "a/4", default="abc", pass_success=True, expand=False)
assert val == ref
val = retrieve(dol, "b/c/e", default="abc", pass_success=True, expand=False)
assert val == ref
ref = [1, 2], True
val = retrieve(dol, "a", default="abc", pass_success=True, expand=False)
assert val == ref
ref = 1, True
val = retrieve(dol, "a/0", default="abc", pass_success=True, expand=False)
assert val == ref
ref = 1, True
val = retrieve(dol, "b/c/d", default="abc", pass_success=True, expand=False)
assert val == ref
def test_retrieve_pass_success_fail_ef():
dol = {"a": [1, 2], "b": {"c": {"d": 1}}, "e": 2}
with pytest.raises(Exception):
retrieve(dol, "f", pass_success=True, expand=False)
with pytest.raises(Exception):
retrieve(dol, "a/4", pass_success=True, expand=False)
with pytest.raises(Exception):
retrieve(dol, "b/c/e", pass_success=True, expand=False)
ref = [1, 2], True
val = retrieve(dol, "a", pass_success=True, expand=False)
assert val == ref
ref = 1, True
val = retrieve(dol, "a/0", pass_success=True, expand=False)
assert val == ref
ref = 1, True
val = retrieve(dol, "b/c/d", pass_success=True, expand=False)
assert val == ref
# -------------------- retrieve with callable leaves ------------------
def nested_leave():
return {"d": 1}
def callable_leave():
return {"c": nested_leave}
def test_retrieve_callable():
dol = {"a": [1, 2], "b": callable_leave, "e": 2}
val = retrieve(dol, "a")
ref = [1, 2]
assert val == ref
val = retrieve(dol, "a/0")
ref = 1
assert val == ref
val = retrieve(dol, "b/c/d")
ref = 1
assert val == ref
# test in-place modification
ref_dol = {"a": [1, 2], "b": {"c": {"d": 1}}, "e": 2}
assert dol == ref_dol
def test_retrieve_default_callable():
dol = {"a": [1, 2], "b": callable_leave, "e": 2}
ref = "abc"
val = retrieve(dol, "f", default="abc")
assert val == ref
val = retrieve(dol, "a/4", default="abc")
assert val == ref
val = retrieve(dol, "b/c/e", default="abc")
assert val == ref
def test_retrieve_fail_callable():
dol = {"a": [1, 2], "b": callable_leave, "e": 2}
with pytest.raises(Exception):
val = retrieve(dol, "f")
with pytest.raises(Exception):
val = retrieve(dol, "a/4")
with pytest.raises(Exception):
val = retrieve(dol, "b/c/e")
def test_retrieve_pass_success_callable():
dol = {"a": [1, 2], "b": callable_leave, "e": 2}
ref = "abc", False
val = retrieve(dol, "f", default="abc", pass_success=True)
assert val == ref
val = retrieve(dol, "a/4", default="abc", pass_success=True)
assert val == ref
val = retrieve(dol, "b/c/e", default="abc", pass_success=True)
assert val == ref
ref = [1, 2], True
val = retrieve(dol, "a", default="abc", pass_success=True)
assert val == ref
ref = 1, True
val = retrieve(dol, "a/0", default="abc", pass_success=True)
assert val == ref
ref = 1, True
val = retrieve(dol, "b/c/d", default="abc", pass_success=True)
assert val == ref
def test_retrieve_pass_success_fail_callable():
dol = {"a": [1, 2], "b": callable_leave, "e": 2}
with pytest.raises(Exception):
retrieve(dol, "f", pass_success=True)
with pytest.raises(Exception):
retrieve(dol, "a/4", pass_success=True)
with pytest.raises(Exception):
retrieve(dol, "b/c/e", pass_success=True)
ref = [1, 2], True
val = retrieve(dol, "a", pass_success=True)
assert val == ref
ref = 1, True
val = retrieve(dol, "a/0", pass_success=True)
assert val == ref
ref = 1, True
val = retrieve(dol, "b/c/d", pass_success=True)
assert val == ref
# -------------------- retrieve with callable and expand=False ------------------
def test_retrieve_ef_callable():
dol = {"a": [1, 2], "b": callable_leave, "e": 2}
val = retrieve(dol, "a", expand=False)
ref = [1, 2]
assert val == ref
val = retrieve(dol, "a/0", expand=False)
ref = 1
assert val == ref
with pytest.raises(Exception):
val = retrieve(dol, "b/c/d", expand=False)
def test_retrieve_default_ef_callable():
dol = {"a": [1, 2], "b": callable_leave, "e": 2}
ref = "abc"
val = retrieve(dol, "f", default="abc", expand=False)
assert val == ref
val = retrieve(dol, "a/4", default="abc", expand=False)
assert val == ref
val = retrieve(dol, "b/c/e", default="abc", expand=False)
assert val == ref
def test_retrieve_fail_ef_callable():
dol = {"a": [1, 2], "b": callable_leave, "e": 2}
with pytest.raises(Exception):
val = retrieve(dol, "f", expand=False)
with pytest.raises(Exception):
val = retrieve(dol, "a/4", expand=False)
with pytest.raises(Exception):
val = retrieve(dol, "b/c/e", expand=False)
def test_retrieve_pass_success_ef_callable():
dol = {"a": [1, 2], "b": callable_leave, "e": 2}
ref = "abc", False
val = retrieve(dol, "f", default="abc", pass_success=True, expand=False)
assert val == ref
val = retrieve(dol, "a/4", default="abc", pass_success=True, expand=False)
assert val == ref
val = retrieve(dol, "b/c/e", default="abc", pass_success=True, expand=False)
assert val == ref
ref = [1, 2], True
val = retrieve(dol, "a", default="abc", pass_success=True, expand=False)
assert val == ref
ref = 1, True
val = retrieve(dol, "a/0", default="abc", pass_success=True, expand=False)
assert val == ref
ref = "abc", False
val = retrieve(dol, "b/c/d", default="abc", pass_success=True, expand=False)
assert val == ref
def test_retrieve_pass_success_fail_ef_callable():
dol = {"a": [1, 2], "b": callable_leave, "e": 2}
with pytest.raises(Exception):
retrieve(dol, "f", pass_success=True, expand=False)
with pytest.raises(Exception):
retrieve(dol, "a/4", pass_success=True, expand=False)
with pytest.raises(Exception):
retrieve(dol, "b/c/e", pass_success=True, expand=False)
ref = [1, 2], True
val = retrieve(dol, "a", pass_success=True, expand=False)
assert val == ref
ref = 1, True
val = retrieve(dol, "a/0", pass_success=True, expand=False)
assert val == ref
with pytest.raises(Exception):
val = retrieve(dol, "b/c/d", pass_success=True, expand=False)
def failing_leave():
raise Exception()
return {"c": nested_leave}
class CustomException(Exception):
pass
def custom_leave():
raise CustomException()
return {"c": nested_leave}
def test_retrieve_propagates_exception():
dol = {"a": [1, 2], "b": failing_leave, "e": 2}
with pytest.raises(Exception):
val = retrieve(dol, "b/c/d", default=0)
dol = {"a": [1, 2], "b": custom_leave, "e": 2}
with pytest.raises(CustomException):
val = retrieve(dol, "b/c/d", default=0)
def test_retrieve_callable_leaves():
dol = {"a": [1, 2], "b": callable_leave, "e": 2}
val = retrieve(dol, "b")
# make sure expansion is returned
assert val == callable_leave()
# make sure expansion was done in-place
assert dol["b"] == callable_leave()
dol = {"a": [1, 2], "b": callable_leave, "e": 2}
val = retrieve(dol, "b/c")
# make sure expansion is returned
assert val == nested_leave()
# make sure expansion was done in-place
assert dol["b"]["c"] == nested_leave()
dol = {"a": [1, 2], "b": callable_leave, "e": 2}
val = retrieve(dol, "b/c/d")
assert val == 1
# ====================== walk ====================
def test_walk():
dol = {"a": [1, 2], "b": {"c": {"d": 1}}, "e": 2}
ref = {"a": [-1, -2], "b": {"c": {"d": -1}}, "e": -2}
def fn(leaf):
return -leaf
val = walk(dol, fn)
assert val == ref
def test_walk_inplace():
dol = {"a": [1, 2], "b": {"c": {"d": 1}}, "e": 2}
ref = {"a": [-1, -2], "b": {"c": {"d": -1}}, "e": -2}
def fn(leaf):
return -leaf
walk(dol, fn, inplace=True)
assert dol == ref
def test_walk_pass_key():
dol = {"a": [1, 2], "b": {"c": {"d": 1}}, "e": 2}
ref = {"a": [-1, -2], "b": {"c": {"d": -1}}, "e": -2}
def fn(key, leaf):
return -leaf
val = walk(dol, fn, pass_key=True)
assert val == ref
def test_walk_pass_key_inplace():
dol = {"a": [1, 2], "b": {"c": {"d": 1}}, "e": 2}
ref = {"a": [-1, -2], "b": {"c": {"d": -1}}, "e": -2}
def fn(key, leaf):
return -leaf
walk(dol, fn, inplace=True, pass_key=True)
assert dol == ref
# =================== set_default ================
def test_set_default_key_contained():
dol = {"a": [1, 2], "b": {"c": {"d": 1}}, "e": 2}
ref = {"a": [1, 2], "b": {"c": {"d": 1}}, "e": 2}
val = set_default(dol, "a", "new")
assert dol == ref
assert val == [1, 2]
def test_set_default_key_not_contained():
dol = {"b": {"c": {"d": 1}}, "e": 2}
ref = {"a": "new", "b": {"c": {"d": 1}}, "e": 2}
val = set_default(dol, "a", "new")
assert dol == ref
assert val == "new"
# =================== set_default ================
def test_contains_key():
dol = {"a": [1, 2], "b": {"c": {"d": 1}}, "e": 2}
assert contains_key(dol, "a")
assert contains_key(dol, "b/c/d")
assert not contains_key(dol, "b/c/f")
assert not contains_key(dol, "f")
def test_contains_key_callable():
dol = {"a": [1, 2], "b": callable_leave, "e": 2}
assert contains_key(dol, "a", expand=True)
dol = {"a": [1, 2], "b": callable_leave, "e": 2} # reset
assert contains_key(dol, "a", expand=False)
assert contains_key(dol, "b/c/d", expand=True)
assert contains_key(dol, "b/c/d", expand=False) # now its expanded
dol = {"a": [1, 2], "b": callable_leave, "e": 2} # reset
assert not contains_key(dol, "b/c/d", expand=False)
assert not contains_key(dol, "b/c/f", expand=True)
dol = {"a": [1, 2], "b": callable_leave, "e": 2} # reset
assert not contains_key(dol, "b/c/f", expand=False)
assert not contains_key(dol, "f", expand=True)
dol = {"a": [1, 2], "b": callable_leave, "e": 2} # reset
assert not contains_key(dol, "f", expand=False)
# ================== leaf names ==================
def test_get_leaf_name():
dol = {"a": [1, 2], "b": {"c": {"d": 1}}, "e": 2}
names = sorted(get_leaf_names(dol))
ref = sorted(["a/0", "a/1", "b/c/d", "e"])
assert names == ref
| 26.676741
| 88
| 0.538102
| 2,918
| 20,301
| 3.625771
| 0.045236
| 0.035917
| 0.10189
| 0.022306
| 0.854726
| 0.823724
| 0.786011
| 0.729301
| 0.708696
| 0.686578
| 0
| 0.025402
| 0.245653
| 20,301
| 760
| 89
| 26.711842
| 0.66547
| 0.040392
| 0
| 0.636542
| 0
| 0
| 0.056429
| 0.004677
| 0
| 0
| 0
| 0
| 0.206287
| 1
| 0.11002
| false
| 0.137525
| 0.009823
| 0.011788
| 0.145383
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
f6f1715210211ba1b5391df6762cd3fcc2823895
| 5,115
|
py
|
Python
|
coleta/riotApi.py
|
LucianoBatista/league_clustering
|
d2a5f54f69335822b9ca777fb44eb278b84082a3
|
[
"Apache-2.0"
] | 6
|
2020-04-19T01:27:56.000Z
|
2020-05-02T02:05:23.000Z
|
coleta/riotApi.py
|
LucianoBatista/league_clustering
|
d2a5f54f69335822b9ca777fb44eb278b84082a3
|
[
"Apache-2.0"
] | null | null | null |
coleta/riotApi.py
|
LucianoBatista/league_clustering
|
d2a5f54f69335822b9ca777fb44eb278b84082a3
|
[
"Apache-2.0"
] | null | null | null |
import RiotConsts as consts
import requests
class GetSummoner(object):
def __init__(self, api_key, region=consts.REGION['brazil']):
self.api_key = api_key
self.region = region
def _request(self, api_url, params={}):
args = {'api_key': self.api_key}
for key, value in params.items():
if key not in args:
args[key] = value
response = requests.get(
consts.URL['base_summoner'].format(
region=self.region,
url=api_url,
),
params=args
)
# print(response.url)
return response.json()
def get_summoner_by_name(self, var):
api_url = consts.URL['summoner_by_name'].format(
version=consts.API_VERSIONS['summoner'],
vars=var
)
return self._request(api_url)
class Getmatchlist(object):
def __init__(self, api_key, region=consts.REGION['brazil']):
self.api_key = api_key
self.region = region
def _request(self, api_url, params={}):
args = {'api_key': self.api_key}
for key, value in params.items():
if key not in args:
args[key] = value
response = requests.get(
consts.URL['base_matchlist'].format(
region=self.region,
url=api_url
),
params=args
)
# print(response.url)
return response.json()
def get_summoner_account(self, var):
api_url = consts.URL['match_list'].format(
version=consts.API_VERSIONS['summoner'],
vars=var
)
return self._request(api_url)
class GetmatchStats(object):
def __init__(self, api_key, region=consts.REGION['brazil']):
self.api_key = api_key
self.region = region
def _request(self, api_url, params={}):
args = {'api_key': self.api_key}
for key, value in params.items():
if key not in args:
args[key] = value
response = requests.get(
consts.URL['base_match'].format(
region=self.region,
url=api_url
),
params=args
)
# print(response.url)
return response.json()
def get_match_stats(self, var):
api_url = consts.URL['match_stats'].format(
version=consts.API_VERSIONS['summoner'],
vars=var
)
return self._request(api_url)
class GetmatchPerChamp(object):
def __init__(self, api_key, region=consts.REGION['brazil']):
self.api_key = api_key
self.region = region
def _request(self, api_url, params={}):
args = {'api_key': self.api_key}
for key, value in params.items():
if key not in args:
args[key] = value
response = requests.get(
consts.URL['base_matchPerChamp'].format(
region=self.region,
url=api_url
),
params=args
)
# print(response.url)
return response.json()
def get_summoner_accountPerChamp(self, var, champ):
api_url = consts.URL['matchPerChamp'].format(
version=consts.API_VERSIONS['summoner'],
vars=var,
champion=champ
)
return self._request(api_url)
class GetmatchPerqueue(object):
def __init__(self, api_key, region=consts.REGION['brazil']):
self.api_key = api_key
self.region = region
def _request(self, api_url, params={}):
args = {'api_key': self.api_key}
for key, value in params.items():
if key not in args:
args[key] = value
response = requests.get(
consts.URL['base_matchPerqueue'].format(
region=self.region,
url=api_url
),
params=args
)
# print(response.url)
return response.json()
def get_summoner_accountPerqueue(self, var, queue):
api_url = consts.URL['matchPerqueue'].format(
version=consts.API_VERSIONS['summoner'],
vars=var,
queue=queue
)
return self._request(api_url)
class GetTierbysummoner(object):
def __init__(self, api_key, region=consts.REGION['brazil']):
self.api_key = api_key
self.region = region
def _request(self, api_url, params={}):
args = {'api_key': self.api_key}
for key, value in params.items():
if key not in args:
args[key] = value
response = requests.get(
consts.URL['base_tier'].format(
region=self.region,
url=api_url
),
params=args
)
# print(response.url)
return response.json()
def get_summonerTier_by_id(self, var):
api_url = consts.URL['tier'].format(
version=consts.API_VERSIONS['summoner'],
vars=var
)
return self._request(api_url)
| 28.898305
| 64
| 0.545455
| 570
| 5,115
| 4.684211
| 0.103509
| 0.067416
| 0.067416
| 0.07191
| 0.858052
| 0.858052
| 0.820599
| 0.800375
| 0.766667
| 0.766667
| 0
| 0
| 0.346432
| 5,115
| 176
| 65
| 29.0625
| 0.798684
| 0.023265
| 0
| 0.711268
| 0
| 0
| 0.055121
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.126761
| false
| 0
| 0.014085
| 0
| 0.267606
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
10090c34b1b434c64f944dc4806dd8e09c58ebca
| 33,378
|
py
|
Python
|
genemethods/assemblypipeline/reporter.py
|
OLC-LOC-Bioinformatics/genemethods
|
cc47e5a18b6704b253975fae4fa8761d9836bee2
|
[
"MIT"
] | 1
|
2020-01-15T18:41:21.000Z
|
2020-01-15T18:41:21.000Z
|
genemethods/assemblypipeline/reporter.py
|
OLC-LOC-Bioinformatics/genemethods
|
cc47e5a18b6704b253975fae4fa8761d9836bee2
|
[
"MIT"
] | null | null | null |
genemethods/assemblypipeline/reporter.py
|
OLC-LOC-Bioinformatics/genemethods
|
cc47e5a18b6704b253975fae4fa8761d9836bee2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from olctools.accessoryFunctions.accessoryFunctions import GenObject
from datetime import datetime
import logging
import os
__author__ = 'adamkoziol'
class Reporter(object):
def metadata_reporter(self):
"""
Creates the metadata report by pulling specific attributes from the metadata objects
"""
logging.info('Creating summary report')
header = '{}\n'.format(','.join(self.headers))
# Create a string to store all the results
data = str()
for sample in self.metadata:
# Add the value of the appropriate attribute to the results string
data += GenObject.returnattr(sample, 'name')
# SampleName
data += GenObject.returnattr(sample.run, 'SamplePlate')
# Genus
data += GenObject.returnattr(sample.general, 'closestrefseqgenus')
# SamplePurity
data += GenObject.returnattr(sample.confindr, 'num_contaminated_snvs')
# N50
n50 = GenObject.returnattr(sample.quast, 'N50',
number=True)
if n50 != '-,':
data += n50
else:
data += '0,'
# NumContigs
data += GenObject.returnattr(sample.quast, 'num_contigs',
number=True)
# TotalLength
data += GenObject.returnattr(sample.quast, 'Total_length',
number=True)
# MeanInsertSize
data += GenObject.returnattr(sample.quast, 'mean_insert',
number=True)
# InsertSizeSTD
data += GenObject.returnattr(sample.quast, 'std_insert',
number=True)
# AverageCoverageDepth
data += GenObject.returnattr(sample.qualimap, 'MeanCoveragedata',
number=True)
# CoverageDepthSTD
data += GenObject.returnattr(sample.qualimap, 'StdCoveragedata',
number=True)
# PercentGC
data += GenObject.returnattr(sample.quast, 'GC',
number=True)
# MASH_ReferenceGenome
data += GenObject.returnattr(sample.mash, 'closestrefseq')
# MASH_NumMatchingHashes
data += GenObject.returnattr(sample.mash, 'nummatches')
# 16S_result
data += GenObject.returnattr(sample.sixteens_full, 'sixteens_match')
# CoreGenesPresent
data += GenObject.returnattr(sample.gdcs, 'coreresults')
# rMLST_Result
try:
# If the number of matches to the closest reference profile is 53, return the profile number
if sample.rmlst.matches == 53:
if type(sample.rmlst.sequencetype) is list:
rmlst_seq_type = ';'.join(sorted(sample.rmlst.sequencetype)).rstrip(';') + ','
else:
rmlst_seq_type = GenObject.returnattr(sample.rmlst, 'sequencetype')
rmlst_seq_type = rmlst_seq_type if rmlst_seq_type != 'ND,' else 'new,'
data += rmlst_seq_type
else:
# Otherwise the profile is set to new
data += 'new,'
except AttributeError:
data += 'new,'
# MLST_Result
try:
if sample.mlst.matches == 7:
if type(sample.mlst.sequencetype) is list:
mlst_seq_type = ';'.join(sorted(sample.mlst.sequencetype)).rstrip(';') + ','
else:
mlst_seq_type = GenObject.returnattr(sample.mlst, 'sequencetype')
mlst_seq_type = mlst_seq_type if mlst_seq_type != 'ND,' else 'new,'
data += mlst_seq_type
else:
data += 'new,'
except AttributeError:
data += 'new,'
# MLST_gene_X_alleles
try:
# Create a set of all the genes present in the results (gene name split from allele)
gene_set = {gene.split('_')[0] for gene in sample.mlst.combined_metadata_results}
for gene in sorted(gene_set):
allele_list = list()
# Determine all the alleles that are present for each gene
for allele in sample.mlst.combined_metadata_results:
if gene in allele:
allele_list.append(allele.replace(' ', '_'))
# If there is more than one allele in the sample, add both to the string separated by a ';'
if len(allele_list) > 1:
data += '{},'.format(';'.join(allele_list))
# Otherwise add the only allele
else:
data += allele_list[0] + ','
# If there are fewer than seven matching alleles, add a ND for each missing result
if len(gene_set) < 7:
data += (7 - len(gene_set)) * 'ND,'
except AttributeError:
# data += '-,-,-,-,-,-,-,'
data += 'ND,ND,ND,ND,ND,ND,ND,'
# E_coli_Serotype
try:
# If no O-type was found, set the output to be O-untypeable
if ';'.join(sample.ectyper.o_type) == '-':
otype = 'O-untypeable'
else:
otype = sample.ectyper.o_type
# Same as above for the H-type
if ';'.join(sample.ectyper.h_type) == '-':
htype = 'H-untypeable'
else:
htype = sample.ectyper.h_type
serotype = '{otype}:{htype},'.format(otype=otype,
htype=htype)
# Add the serotype to the data string unless neither O-type not H-type were found; add ND instead
data += serotype if serotype != 'O-untypeable:H-untypeable,' else 'ND,'
except AttributeError:
data += 'ND,'
# SISTR_serovar_antigen
data += GenObject.returnattr(sample.sistr, 'serovar_antigen').rstrip(';')
# SISTR_serovar_cgMLST
data += GenObject.returnattr(sample.sistr, 'serovar_cgmlst')
# SISTR_serogroup
data += GenObject.returnattr(sample.sistr, 'serogroup')
# SISTR_h1
data += GenObject.returnattr(sample.sistr, 'h1').rstrip(';')
# SISTR_h2
data += GenObject.returnattr(sample.sistr, 'h2').rstrip(';')
# SISTR_serovar
data += GenObject.returnattr(sample.sistr, 'serovar')
# GeneSeekr_Profile
try:
if sample.genesippr.report_output:
data += ';'.join(sample.genesippr.report_output) + ','
else:
data += 'ND,'
except AttributeError:
data += 'ND,'
# Vtyper_Profile
data += GenObject.returnattr(sample.verotoxin, 'verotoxin_subtypes_set')
# AMR_Profile and resistant/sensitive status
if sample.resfinder_assembled.pipelineresults:
# Profile
for resistance, resistance_set in sorted(sample.resfinder_assembled.pipelineresults.items()):
data += '{res}({r_set});'.format(res=resistance.replace(',', ';'),
r_set=';'.join(sorted(list(resistance_set))))
data += ','
# Resistant/Sensitive
data += 'Resistant,'
else:
# Profile
data += 'ND,'
# Resistant/Sensitive
data += 'Sensitive,'
# Plasmid Result'
if sample.mobrecon.pipelineresults:
for plasmid, details in sorted(sample.mobrecon.pipelineresults.items()):
data += '{plasmid}({details});'.format(plasmid=plasmid,
details=details)
data += ','
else:
data += 'ND,'
# TotalPredictedGenes
data += GenObject.returnattr(sample.prodigal, 'predictedgenestotal',
number=True)
# PredictedGenesOver3000bp
data += GenObject.returnattr(sample.prodigal, 'predictedgenesover3000bp',
number=True)
# PredictedGenesOver1000bp
data += GenObject.returnattr(sample.prodigal, 'predictedgenesover1000bp',
number=True)
# PredictedGenesOver500bp
data += GenObject.returnattr(sample.prodigal, 'predictedgenesover500bp',
number=True)
# PredictedGenesUnder500bp
data += GenObject.returnattr(sample.prodigal, 'predictedgenesunder500bp',
number=True)
# AssemblyDate
data += datetime.now().strftime('%Y-%m-%d') + ','
# PipelineVersion
data += self.commit + ','
# Name of the database used in the analyses
data += os.path.split(self.reffilepath)[-1] + ','
# Database download date
data += self.download_date
# Append a new line to the end of the results for this sample
data += '\n'
# Replace any NA values with ND
cleandata = data.replace('NA', 'ND')
with open(os.path.join(self.reportpath, 'combinedMetadata.csv'), 'w') as metadatareport:
metadatareport.write(header)
metadatareport.write(cleandata)
def legacy_reporter(self):
"""
Creates an output that is compatible with the strain database. This method will be removed once
a new database scheme is implemented
"""
logging.info('Creating database-friendly summary report')
header = '{}\n'.format(','.join(self.legacy_headers))
# Create a string to store all the results
data = str()
for sample in self.metadata:
# Add the value of the appropriate attribute to the results string
data += GenObject.returnattr(sample, 'name')
# SampleName
data += GenObject.returnattr(sample.run, 'SamplePlate')
# Genus
data += GenObject.returnattr(sample.general, 'closestrefseqgenus')
# SequencingDate
data += GenObject.returnattr(sample.run, 'Date')
# Analyst
data += GenObject.returnattr(sample.run, 'InvestigatorName')
# Legacy ConFindr clean/contaminated call
data += 'ND,'
# N50
n50 = GenObject.returnattr(sample.quast, 'N50',
number=True)
if n50 != '-,':
data += n50
else:
data += '0,'
# NumContigs
data += GenObject.returnattr(sample.quast, 'num_contigs',
number=True)
# TotalLength
data += GenObject.returnattr(sample.quast, 'Total_length',
number=True)
# MeanInsertSize
data += GenObject.returnattr(sample.quast, 'mean_insert',
number=True)
# InsertSizeSTD
data += GenObject.returnattr(sample.quast, 'std_insert',
number=True)
# AverageCoverageDepth
data += GenObject.returnattr(sample.qualimap, 'MeanCoveragedata',
number=True)
# CoverageDepthSTD
data += GenObject.returnattr(sample.qualimap, 'StdCoveragedata',
number=True)
# PercentGC
data += GenObject.returnattr(sample.quast, 'GC',
number=True)
# MASH_ReferenceGenome
data += GenObject.returnattr(sample.mash, 'closestrefseq')
# MASH_NumMatchingHashes
data += GenObject.returnattr(sample.mash, 'nummatches')
# 16S_result
data += GenObject.returnattr(sample.sixteens_full, 'sixteens_match')
# rMLST_Result
try:
# If the number of matches to the closest reference profile is 53, return the profile number
if sample.rmlst.matches == 53:
if type(sample.rmlst.sequencetype) is list:
rmlst_seq_type = ';'.join(sorted(sample.rmlst.sequencetype)).rstrip(';') + ','
else:
rmlst_seq_type = GenObject.returnattr(sample.rmlst, 'sequencetype')
rmlst_seq_type = rmlst_seq_type if rmlst_seq_type != 'ND,' else 'new,'
data += rmlst_seq_type
else:
# Otherwise the profile is set to new
data += 'new,'
except AttributeError:
data += 'new,'
# MLST_Result
try:
if sample.mlst.matches == 7:
if type(sample.mlst.sequencetype) is list:
mlst_seq_type = ';'.join(sorted(sample.mlst.sequencetype)).rstrip(';') + ','
else:
mlst_seq_type = GenObject.returnattr(sample.mlst, 'sequencetype')
mlst_seq_type = mlst_seq_type if mlst_seq_type != 'ND,' else 'new,'
data += mlst_seq_type
else:
data += 'new,'
except AttributeError:
data += 'new,'
# MLST_gene_X_alleles
try:
# Create a set of all the genes present in the results (gene name split from allele)
gene_set = {gene.split('_')[0] for gene in sample.mlst.combined_metadata_results}
for gene in sorted(gene_set):
allele_list = list()
# Determine all the alleles that are present for each gene
for allele in sample.mlst.combined_metadata_results:
if gene in allele:
allele_list.append(allele.replace(' ', '_'))
# If there is more than one allele in the sample, add both to the string separated by a ';'
if len(allele_list) > 1:
data += '{},'.format(';'.join(allele_list))
# Otherwise add the only allele
else:
data += allele_list[0] + ','
# If there are fewer than seven matching alleles, add a ND for each missing result
if len(gene_set) < 7:
data += (7 - len(gene_set)) * 'ND,'
except AttributeError:
# data += '-,-,-,-,-,-,-,'
data += 'ND,ND,ND,ND,ND,ND,ND,'
# CoreGenesPresent
data += GenObject.returnattr(sample.gdcs, 'coreresults')
# E_coli_Serotype
try:
# If no O-type was found, set the output to be O-untypeable
if ';'.join(sample.ectyper.o_type) == '-':
otype = 'O-untypeable'
else:
otype = sample.ectyper.o_type
# Same as above for the H-type
if ';'.join(sample.ectyper.h_type) == '-':
htype = 'H-untypeable'
else:
htype = sample.ectyper.h_type
serotype = '{otype}:{htype},'.format(otype=otype,
htype=htype)
# Add the serotype to the data string unless neither O-type not H-type were found; add ND instead
data += serotype if serotype != 'O-untypeable:H-untypeable,' else 'ND,'
except AttributeError:
data += 'ND,'
# SISTR_serovar_antigen
data += GenObject.returnattr(sample.sistr, 'serovar_antigen').rstrip(';')
# SISTR_serovar_cgMLST
data += GenObject.returnattr(sample.sistr, 'serovar_cgmlst')
# SISTR_serogroup
data += GenObject.returnattr(sample.sistr, 'serogroup')
# SISTR_h1
data += GenObject.returnattr(sample.sistr, 'h1').rstrip(';')
# SISTR_h2
data += GenObject.returnattr(sample.sistr, 'h2').rstrip(';')
# SISTR_serovar
data += GenObject.returnattr(sample.sistr, 'serovar')
# GeneSeekr_Profile
try:
if sample.genesippr.report_output:
data += ';'.join(sample.genesippr.report_output) + ','
else:
data += 'ND,'
except AttributeError:
data += 'ND,'
# Vtyper_Profile
data += GenObject.returnattr(sample.verotoxin, 'verotoxin_subtypes_set')
# AMR_Profile and resistant/sensitive status
if sample.resfinder_assembled.pipelineresults:
# Profile
for resistance, resistance_set in sorted(sample.resfinder_assembled.pipelineresults.items()):
data += '{res}({r_set});'.format(res=resistance.replace(',', ';'),
r_set=';'.join(sorted(list(resistance_set))))
data += ','
# Resistant/Sensitive
data += 'Resistant,'
else:
# Profile
data += 'ND,'
# Resistant/Sensitive
data += 'Sensitive,'
# Plasmid Result'
if sample.mobrecon.pipelineresults:
for plasmid, details in sorted(sample.mobrecon.pipelineresults.items()):
data += '{plasmid}({details});'.format(plasmid=plasmid,
details=details)
data += ','
else:
data += 'ND,'
# TotalPredictedGenes
data += GenObject.returnattr(sample.prodigal, 'predictedgenestotal',
number=True)
# PredictedGenesOver3000bp
data += GenObject.returnattr(sample.prodigal, 'predictedgenesover3000bp',
number=True)
# PredictedGenesOver1000bp
data += GenObject.returnattr(sample.prodigal, 'predictedgenesover1000bp',
number=True)
# PredictedGenesOver500bp
data += GenObject.returnattr(sample.prodigal, 'predictedgenesover500bp',
number=True)
# PredictedGenesUnder500bp
data += GenObject.returnattr(sample.prodigal, 'predictedgenesunder500bp',
number=True)
# NumClustersPF
data += GenObject.returnattr(sample.run, 'NumberofClustersPF')
# Percentage of reads mapping to PhiX control
data += GenObject.returnattr(sample.run, 'phix_aligned')
# Error rate calculated from PhiX control
data += GenObject.returnattr(sample.run, 'error_rate')
# LengthForwardRead
data += GenObject.returnattr(sample.run, 'forwardlength',
number=True)
# LengthReverseRead
data += GenObject.returnattr(sample.run, 'reverselength',
number=True)
# Real time strain
data += GenObject.returnattr(sample.run, 'Description')
# Flowcell
data += GenObject.returnattr(sample.run, 'flowcell')
# MachineName
data += GenObject.returnattr(sample.run, 'instrument')
# PipelineVersion
data += self.commit + ','
# AssemblyDate
data += datetime.now().strftime('%Y-%m-%d') + ','
# SamplePurity
data += GenObject.returnattr(sample.confindr, 'num_contaminated_snvs')
# cgMLST
try:
if type(sample.cgmlst.sequencetype) is list:
if sample.cgmlst.sequencetype:
cgmlst_seq_type = ';'.join(sorted(sample.cgmlst.sequencetype)).rstrip(';') + ','
else:
cgmlst_seq_type = 'ND,'
else:
cgmlst_seq_type = GenObject.returnattr(sample.cgmlst, 'sequencetype')
# cgmlst_seq_type = cgmlst_seq_type if cgmlst_seq_type != 'ND,' else 'new,'
data += cgmlst_seq_type
except AttributeError:
data += 'ND,'
# Name of the database used in the analyses
data += os.path.split(self.reffilepath)[-1] + ','
# Database download date
data += self.download_date
# Append a new line to the end of the results for this sample
data += '\n'
# Replace any NA values with ND
cleandata = data.replace('NA', 'ND')
with open(os.path.join(self.reportpath, 'legacy_combinedMetadata.csv'), 'w') as metadatareport:
metadatareport.write(header)
metadatareport.write(cleandata)
def clean_object(self):
for sample in self.metadata:
try:
delattr(sample.coregenome, 'targetnames')
except AttributeError:
pass
try:
delattr(sample.coregenome, 'targets')
except AttributeError:
pass
def run_quality_reporter(self):
logging.info('Creating run quality summary report')
run_name = os.path.split(self.path)[-1]
data = 'RunName,SequencingDate,Analyst,ClusterDensity,PercentOverQ30,NumberofClustersPF,' \
'PercentReadsPhiX,ErrorRate, LengthForwardRead,LengthReverseRead,Flowcell,MachineName\n'
for sample in self.metadata:
# RunName
data += '{rn},'.format(rn=run_name)
# SequencingDate
data += GenObject.returnattr(sample.run, 'Date')
# Analyst
data += GenObject.returnattr(sample.run, 'InvestigatorName')
# ClusterDensity
data += GenObject.returnattr(sample.run, 'cluster_density')
# Percentage of reads with Q-score over 30
data += GenObject.returnattr(sample.run, 'over_q30')
# NumClustersPF
data += GenObject.returnattr(sample.run, 'NumberofClustersPF')
# Percentage of reads mapping to PhiX control
data += GenObject.returnattr(sample.run, 'phix_aligned')
# Error rate calculated from PhiX control
data += GenObject.returnattr(sample.run, 'error_rate')
# LengthForwardRead
data += GenObject.returnattr(sample.run, 'forwardlength',
number=True)
# LengthReverseRead
data += GenObject.returnattr(sample.run, 'reverselength',
number=True)
# Flowcell
data += GenObject.returnattr(sample.run, 'flowcell')
# MachineName
data += GenObject.returnattr(sample.run, 'instrument')
break
with open(os.path.join(self.reportpath, 'run_metrics_report.csv'), 'w') as run_report:
run_report.write(data)
def sample_quality_report(self):
logging.info('Creating sample quality summary report')
header = '{}\n'.format(','.join(self.quality_headers))
# Create a string to store all the results
data = str()
for sample in self.metadata:
# Add the value of the appropriate attribute to the results string
data += GenObject.returnattr(sample, 'name')
# SampleName
data += GenObject.returnattr(sample.run, 'SamplePlate')
# Genus
data += GenObject.returnattr(sample.general, 'closestrefseqgenus')
# SamplePurity
data += GenObject.returnattr(sample.confindr, 'num_contaminated_snvs')
# N50
n50 = GenObject.returnattr(sample.quast, 'N50',
number=True)
if n50 != '-,':
data += n50
else:
data += '0,'
# NumContigs
data += GenObject.returnattr(sample.quast, 'num_contigs',
number=True)
# TotalLength
data += GenObject.returnattr(sample.quast, 'Total_length',
number=True)
# MeanInsertSize
data += GenObject.returnattr(sample.quast, 'mean_insert',
number=True)
# InsertSizeSTD
data += GenObject.returnattr(sample.quast, 'std_insert',
number=True)
# AverageCoverageDepth
data += GenObject.returnattr(sample.qualimap, 'MeanCoveragedata',
number=True)
# CoverageDepthSTD
data += GenObject.returnattr(sample.qualimap, 'StdCoveragedata',
number=True)
# PercentGC
data += GenObject.returnattr(sample.quast, 'GC',
number=True)
# MASH_ReferenceGenome
data += GenObject.returnattr(sample.mash, 'closestrefseq')
# MASH_NumMatchingHashes
data += GenObject.returnattr(sample.mash, 'nummatches')
# rMLST_Result
try:
# If the number of matches to the closest reference profile is 53, return the profile number
if sample.rmlst.matches == 53:
if type(sample.rmlst.sequencetype) is list:
rmlst_seq_type = ';'.join(sorted(sample.rmlst.sequencetype)).rstrip(';') + ','
else:
rmlst_seq_type = GenObject.returnattr(sample.rmlst, 'sequencetype')
rmlst_seq_type = rmlst_seq_type if rmlst_seq_type != 'ND,' else 'new,'
data += rmlst_seq_type
else:
# Otherwise the profile is set to new
data += 'new,'
except AttributeError:
data += 'new,'
# TotalPredictedGenes
data += GenObject.returnattr(sample.prodigal, 'predictedgenestotal',
number=True)
# PredictedGenesOver3000bp
data += GenObject.returnattr(sample.prodigal, 'predictedgenesover3000bp',
number=True)
# PredictedGenesOver1000bp
data += GenObject.returnattr(sample.prodigal, 'predictedgenesover1000bp',
number=True)
# PredictedGenesOver500bp
data += GenObject.returnattr(sample.prodigal, 'predictedgenesover500bp',
number=True)
# PredictedGenesUnder500bp
data += GenObject.returnattr(sample.prodigal, 'predictedgenesunder500bp',
number=True)
# PipelineVersion
data += self.commit + ','
# AssemblyDate
data += datetime.now().strftime('%Y-%m-%d') + ','
# Name of the database used in the analyses
data += os.path.split(self.reffilepath)[-1] + ','
# Database download date
data += self.download_date
# Append a new line to the end of the results for this sample
data += '\n'
# Replace any NA values with ND
cleandata = data.replace('NA', 'ND')
with open(os.path.join(self.reportpath, 'preliminary_combinedMetadata.csv'), 'w') as metadatareport:
metadatareport.write(header)
metadatareport.write(cleandata)
def __init__(self, inputobject):
self.metadata = inputobject.runmetadata.samples
self.commit = inputobject.commit
self.reportpath = inputobject.reportpath
self.starttime = inputobject.starttime
self.path = inputobject.path
self.reffilepath = inputobject.reffilepath
# Define the headers to be used in the metadata report
self.quality_headers = [
'SeqID',
'SampleName',
'Genus',
'ConfindrContamSNVs',
'N50',
'NumContigs',
'TotalLength',
'MeanInsertSize',
'InsertSizeSTD',
'AverageCoverageDepth',
'CoverageDepthSTD',
'PercentGC',
'MASH_ReferenceGenome',
'MASH_NumMatchingHashes',
'rMLST_Result',
'TotalPredictedGenes',
'PredictedGenesOver3000bp',
'PredictedGenesOver1000bp',
'PredictedGenesOver500bp',
'PredictedGenesUnder500bp',
'AssemblyDate',
'PipelineVersion',
'Database',
'DatabaseDownloadDate'
]
self.headers = [
'SeqID',
'SampleName',
'Genus',
'ConfindrContamSNVs',
'N50',
'NumContigs',
'TotalLength',
'MeanInsertSize',
'InsertSizeSTD',
'AverageCoverageDepth',
'CoverageDepthSTD',
'PercentGC',
'MASH_ReferenceGenome',
'MASH_NumMatchingHashes',
'16S_result',
'CoreGenesPresent',
'rMLST_Result',
'MLST_Result',
'MLST_gene_1_allele',
'MLST_gene_2_allele',
'MLST_gene_3_allele',
'MLST_gene_4_allele',
'MLST_gene_5_allele',
'MLST_gene_6_allele',
'MLST_gene_7_allele',
'E_coli_Serotype',
'SISTR_serovar_antigen',
'SISTR_serovar_cgMLST',
'SISTR_serogroup',
'SISTR_h1',
'SISTR_h2',
'SISTR_serovar',
'GeneSeekr_Profile',
'Vtyper_Profile',
'AMR_Profile',
'AMR Resistant/Sensitive',
'PlasmidProfile',
'TotalPredictedGenes',
'PredictedGenesOver3000bp',
'PredictedGenesOver1000bp',
'PredictedGenesOver500bp',
'PredictedGenesUnder500bp',
'AssemblyDate',
'PipelineVersion',
'Database',
'DatabaseDownloadDate'
]
self.legacy_headers = [
'SeqID',
'SampleName',
'Genus',
'SequencingDate',
'Analyst',
'SamplePurity',
'N50',
'NumContigs',
'TotalLength',
'MeanInsertSize',
'InsertSizeSTD',
'AverageCoverageDepth',
'CoverageDepthSTD',
'PercentGC',
'MASH_ReferenceGenome',
'MASH_NumMatchingHashes',
'16S_result',
'rMLST_Result',
'MLST_Result',
'MLST_gene_1_allele',
'MLST_gene_2_allele',
'MLST_gene_3_allele',
'MLST_gene_4_allele',
'MLST_gene_5_allele',
'MLST_gene_6_allele',
'MLST_gene_7_allele',
'CoreGenesPresent',
'E_coli_Serotype',
'SISTR_serovar_antigen',
'SISTR_serovar_cgMLST',
'SISTR_serogroup',
'SISTR_h1',
'SISTR_h2',
'SISTR_serovar',
'GeneSeekr_Profile',
'Vtyper_Profile',
'AMR_Profile',
'AMR Resistant/Sensitive',
'PlasmidProfile',
'TotalPredictedGenes',
'PredictedGenesOver3000bp',
'PredictedGenesOver1000bp',
'PredictedGenesOver500bp',
'PredictedGenesUnder500bp',
'NumClustersPF',
'PercentReadsPhiX',
'ErrorRate',
'LengthForwardRead',
'LengthReverseRead',
'RealTimeStrain',
'Flowcell',
'MachineName',
'PipelineVersion',
'AssemblyDate',
'ConfindrContamSNVs',
'cgMLST_Result',
'Database',
'DatabaseDownloadDate'
]
try:
with open(os.path.join(self.reffilepath, 'download_date'), 'r') as download_date:
self.download_date = download_date.readline().rstrip()
except FileNotFoundError:
self.download_date = 'ND'
| 45.723288
| 113
| 0.510995
| 2,770
| 33,378
| 6.039711
| 0.114079
| 0.11584
| 0.152421
| 0.161207
| 0.88147
| 0.868978
| 0.862044
| 0.846862
| 0.844591
| 0.840526
| 0
| 0.011577
| 0.394451
| 33,378
| 729
| 114
| 45.786008
| 0.816149
| 0.142579
| 0
| 0.874307
| 0
| 0
| 0.149111
| 0.041099
| 0
| 0
| 0
| 0
| 0
| 1
| 0.011091
| false
| 0.003697
| 0.007394
| 0
| 0.020333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
63fab79ddb25b88dcb4d967c7d1af9880293d6f3
| 2,447
|
py
|
Python
|
conformance/tests/ot_count.py
|
bmeg/grip
|
60d524f3ced989ea80edbef5565f8de78b03081a
|
[
"MIT"
] | 17
|
2018-10-25T15:33:51.000Z
|
2022-03-16T19:07:22.000Z
|
conformance/tests/ot_count.py
|
bmeg/grip
|
60d524f3ced989ea80edbef5565f8de78b03081a
|
[
"MIT"
] | 80
|
2018-08-28T17:15:31.000Z
|
2022-01-25T02:01:16.000Z
|
conformance/tests/ot_count.py
|
bmeg/arachne
|
561a6b179eeb18003f774586fffe1b551896c6d2
|
[
"MIT"
] | 5
|
2017-09-12T05:40:41.000Z
|
2018-06-29T10:00:39.000Z
|
def test_count(man):
errors = []
G = man.setGraph("swapi")
i = list(G.query().V().count())
if len(i) < 1:
errors.append("Fail: nothing returned for O.query().V().count()")
elif i[0]["count"] != 39:
errors.append("Fail: G.query().V().count() %s != %s" % (i[0]["count"], 39))
i = list(G.query().V("non-existent").count())
print(i)
if len(i) < 1:
errors.append("Fail: nothing returned for O.query().V(\"non-existent\").count()")
elif i[0]["count"] != 0:
errors.append("Fail: G.query().V(\"non-existent\").count() %s != %s" % (i[0]["count"], 0))
i = list(G.query().E().count())
if len(i) < 1:
errors.append("Fail: nothing returned for O.query().E().count()")
elif i[0]["count"] != 144:
errors.append("Fail: G.query().E().count() %s != %s" % (i[0]["count"], 144))
i = list(G.query().E("non-existent").count())
if len(i) < 1:
errors.append("Fail: nothing returned for G.query().E(\"non-existent\").count()")
elif i[0]["count"] != 0:
errors.append("Fail: G.query().E(\"non-existent\").count() %s != %s" % (i[0]["count"], 0))
return errors
# tests an edge case where mongo aggregations fill fail to return a count when
# the ccollection doesnt exist
def test_count_when_no_data(man):
errors = []
G = man.writeTest()
i = list(G.query().V().count())
print(i)
if len(i) < 1:
errors.append("Fail: nothing returned for G.query().V().count()")
elif i[0]["count"] != 0:
errors.append("Fail: G.query().V().count() %s != %s" % (i[0]["count"], 0))
i = list(G.query().V("non-existent").count())
if len(i) < 1:
errors.append("Fail: nothing returned for G.query().V(\"non-existent\").count()")
elif i[0]["count"] != 0:
errors.append("Fail: G.query().V(\"non-existent\").count() %s != %s" % (i[0]["count"], 0))
i = list(G.query().E().count())
if len(i) < 1:
errors.append("Fail: nothing returned for G.query().E().count()")
elif i[0]["count"] != 0:
errors.append("Fail: G.query().E().count() %s != %s" % (i[0]["count"], 0))
i = list(G.query().E("non-existent").count())
if len(i) < 1:
errors.append("Fail: nothing returned for G.query().E(\"non-existent\").count()")
elif i[0]["count"] != 0:
errors.append("Fail: G.query().E(\"non-existent\").count() %s != %s" % (i[0]["count"], 0))
return errors
| 35.463768
| 98
| 0.534941
| 374
| 2,447
| 3.486631
| 0.131016
| 0.096626
| 0.196319
| 0.07362
| 0.861963
| 0.861963
| 0.843558
| 0.818252
| 0.799847
| 0.799847
| 0
| 0.023699
| 0.206784
| 2,447
| 68
| 99
| 35.985294
| 0.64812
| 0.04291
| 0
| 0.68
| 0
| 0
| 0.347882
| 0.071887
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0
| 0
| 0
| 0.08
| 0.04
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1251fef0c673d36838a965089f051af4343b2e2c
| 101
|
py
|
Python
|
plugin/web/__init__.py
|
lisugar/ray_build_tools
|
a304c8fc30ce9f61cbdc566d8dc193945f14883d
|
[
"MIT"
] | null | null | null |
plugin/web/__init__.py
|
lisugar/ray_build_tools
|
a304c8fc30ce9f61cbdc566d8dc193945f14883d
|
[
"MIT"
] | null | null | null |
plugin/web/__init__.py
|
lisugar/ray_build_tools
|
a304c8fc30ce9f61cbdc566d8dc193945f14883d
|
[
"MIT"
] | null | null | null |
from build_tools.plugin.web import executor
def get_plugin_class():
return executor.BottlePlugin
| 25.25
| 43
| 0.821782
| 14
| 101
| 5.714286
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.118812
| 101
| 4
| 44
| 25.25
| 0.898876
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
89bff51348703ab9357ca6418f6d58fc9208ac58
| 195,841
|
py
|
Python
|
tests/api/analytics/endpoints/analytics_tests.py
|
huang195/iter8-analytics
|
6d03128ac3fc3f4e9754d41e44baa72f411f29e5
|
[
"Apache-2.0"
] | null | null | null |
tests/api/analytics/endpoints/analytics_tests.py
|
huang195/iter8-analytics
|
6d03128ac3fc3f4e9754d41e44baa72f411f29e5
|
[
"Apache-2.0"
] | null | null | null |
tests/api/analytics/endpoints/analytics_tests.py
|
huang195/iter8-analytics
|
6d03128ac3fc3f4e9754d41e44baa72f411f29e5
|
[
"Apache-2.0"
] | null | null | null |
"""Tests for the analytics REST API."""
import unittest
from unittest.mock import Mock
from unittest.mock import patch
from requests.models import Response
import json
from iter8_analytics import app as flask_app
from iter8_analytics.api.analytics.endpoints.examples import eip_example
from iter8_analytics.api.analytics import responses
from iter8_analytics.api.analytics import request_parameters
import iter8_analytics.constants as constants
from iter8_analytics.api.analytics.successcriteria import StatisticalTests, SuccessCriterion
import dateutil.parser as parser
from collections import namedtuple
import logging
import os
import requests_mock
import requests
import re
from urllib.parse import urlencode
log = logging.getLogger(__name__)
class TestAnalyticsNamespaceAPI(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Setup common to all tests."""
# Initialize the Flask app for testing
flask_app.app.testing = True
flask_app.config_logger()
flask_app.initialize(flask_app.app)
# Get an internal Flask test client
cls.flask_test = flask_app.app.test_client()
cls.backend_url = os.getenv(constants.METRICS_BACKEND_URL_ENV)
cls.metrics_endpoint = f'{cls.backend_url}/api/v1/query'
#cls.metrics_endpoint = f'http://localhost:9090/api/v1/query'
log.info('Completed initialization for all analytics REST API tests.')
##All tests after this involve the /analytics/canary/check_and_increment endpoint (until mentioned otherwise)
def test_payload_canary_check_and_increment(self):
"""Tests the REST endpoint /analytics/canary/check_and_increment."""
endpoint = f'http://localhost:5555/api/v1/analytics/canary/check_and_increment'
with requests_mock.mock() as m:
m.get(self.metrics_endpoint, json=json.load(open("tests/data/prometheus_sample_response.json")))
###################
# Test request with some required parameters
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with some required parameters")
parameters = {
"baseline": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"success_criteria": [
{
"metric_name": "iter8_error_rate",
"is_counter": False,
"absent_value": "0.0",
"metric_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',response_code=~'5..',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"type": "delta",
"value": 0.02,
"sample_size": 0,
"stop_on_failure": False
}
]
},
"_last_state": {}
}
# Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
self.assertEqual(resp.status_code, 200, resp.data)
###################
# Test request with no change observed wrt sample size or metric parameters
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with some required parameters")
parameters = {
"baseline": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"success_criteria": [
{
"metric_name": "iter8_error_rate",
"is_counter": False,
"absent_value": "0.0",
"metric_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',response_code=~'5..',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"type": "delta",
"value": 0.02,
"sample_size": 0,
"stop_on_failure": False
}
]
},
"_last_state": {
"baseline": {
"traffic_percentage": 75,
"success_criterion_information": [
[0,0]]
},
"candidate": {
"traffic_percentage": 25,
"success_criterion_information": [
[0,0]]
}
}
}
# Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
correct_response = {"baseline":{"traffic_percentage":73.0,"success_criterion_information":[[21,21.764]]},"candidate":{"traffic_percentage":27,"success_criterion_information":[[19,19.677]]}}
self.assertEqual(resp.status_code, 200, resp.data)
self.assertEqual(resp.get_json()["_last_state"], correct_response)
##################
# Test request with start_time missing in payload
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with start_time missing in payload")
parameters = {
"baseline": {
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"success_criteria": [
{
"metric_name": "iter8_error_count",
"is_counter": True,
"absent_value": "0.0",
"metric_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',response_code=~'5..',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"type": "delta",
"value": 0.02,
"sample_size": 0,
"stop_on_failure": False
}
]
},
"_last_state": {}
}
# Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
# We should get a BAD REQUEST HTTP error
self.assertEqual(resp.status_code, 400, 'Missing start_time parameter')
assert b'\'start_time\' is a required property' in resp.data
##################
# Test request with success_criteria missing in payload
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with success_criteria missing in payload")
parameters = {
"baseline": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
},
"_last_state": {}
}
# Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
# We should get a BAD REQUEST HTTP error
self.assertEqual(resp.status_code, 400, 'Missing success_criteria missing in payload')
assert b'\'success_criteria\' is a required property' in resp.data
###################
# Test request with baseline missing in payload
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with baseline missing in payload")
parameters = {
"candidate": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"success_criteria": [
{
"metric_name": "iter8_error_count",
"is_counter": True,
"absent_value": "0.0",
"metric_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',response_code=~'5..',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"type": "delta",
"value": 0.02,
"sample_size": 0,
"stop_on_failure": False
}
]
},
"_last_state": {}
}
# Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
# We should get a BAD REQUEST HTTP error
self.assertEqual(resp.status_code, 400, 'Baseline missing in payload')
assert b'\'baseline\' is a required property' in resp.data
###################
# Test request with missing value in success_criteria
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with missing value in success_criteria")
parameters = {
"baseline": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"success_criteria": [
{
"metric_name": "iter8_error_count",
"is_counter": True,
"absent_value": "0.0",
"metric_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',response_code=~'5..',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"type": "delta",
"sample_size": 0,
"stop_on_failure": False
}
]
},
"_last_state": {}
}
# Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
self.assertEqual(resp.status_code, 400, 'Missing value in success_criteria')
assert b'\'value\' is a required property' in resp.data
###################
# Test request with Unknown type in is_counter
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with Unknown type in is_counter")
parameters = {
"baseline": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"success_criteria": [
{
"metric_name": "iter8_error_count",
"is_counter": "No",
"absent_value": "0.0",
"metric_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',response_code=~'5..',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"type": "normal",
"value": 0.02,
"sample_size": 0,
"stop_on_failure": False
}
]
},
"_last_state": {}
}
# Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
self.assertEqual(resp.status_code, 400, 'Unknown type in is_counter')
assert b"\'No\' is not of type \'boolean\'" in resp.data
assert b'\'normal\' is not one of [\'delta\', \'threshold\']' in resp.data
##################
# Test request with metric type missing in payload
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with is_counter missing in payload")
parameters = {
"baseline": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"success_criteria": [
{
"metric_name": "iter8_error_count",
"metric_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',response_code=~'5..',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"type": "threshold",
"value": 0.02,
"sample_size": 0,
"stop_on_failure": False
}
]
},
"_last_state": {}
}
# Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
# We should get a BAD REQUEST HTTP error
self.assertEqual(resp.status_code, 400, 'is_counter missing in payload')
assert b'\'is_counter\' is a required property' in resp.data
#assert b'\'absent_value\' is a required property' in resp.data
##################
# Test request with absent value of type float in payload
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with absent value of type float in payload")
parameters = {
"baseline": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"success_criteria": [
{
"metric_name": "iter8_error_count",
"is_counter": True,
"absent_value": 0,
"metric_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',response_code=~'5..',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"type": "threshold",
"value": 0.02,
"sample_size": 0,
"stop_on_failure": False
}
]
},
"_last_state": {}
}
# Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
# We should get a BAD REQUEST HTTP error
self.assertEqual(resp.status_code, 400, 'new absent_value type in payload')
assert b'0 is not of type \'string\'' in resp.data
##################
# Test request with metric_query_template missing in payload
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with metric_query_template missing in payload")
parameters = {
"baseline": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"success_criteria": [
{
"metric_name": "iter8_error_count",
"is_counter": True,
"absent_value": "0.0",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"type": "threshold",
"value": 0.02,
"sample_size": 0,
"stop_on_failure": False
}
]
},
"_last_state": {}
}
# Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
# We should get a BAD REQUEST HTTP error
self.assertEqual(resp.status_code, 400, 'metric_query_template missing in payload')
assert b'\'metric_query_template\' is a required property' in resp.data
##################
# Test request with metric_sample_size_query_template missing in payload
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with metric_sample_size_query_template missing in payload")
parameters = {
"baseline": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"success_criteria": [
{
"metric_name": "iter8_error_count",
"is_counter": True,
"absent_value": "0.0",
"metric_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',response_code=~'5..',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"type": "threshold",
"value": 0.02,
"sample_size": 0,
"stop_on_failure": False
}
]
},
"_last_state": {}
}
# Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
# We should get a BAD REQUEST HTTP error
self.assertEqual(resp.status_code, 400, 'metric_query_template missing in payload')
assert b'\'metric_sample_size_query_template\' is a required property' in resp.data
##################
# Test request threshold crossing in a counter metric
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with metric_sample_size_query_template missing in payload")
parameters = {
"baseline": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"success_criteria": [
{
"metric_name": "iter8_error_count",
"is_counter": True,
"metric_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',response_code=~'5..',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"type": "threshold",
"value": 18,
"sample_size": 0,
"stop_on_failure": False
}
]
},
"_last_state": {}
}
# Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
assert resp.get_json()["assessment"]["summary"]["abort_experiment"]
assert not resp.get_json()["assessment"]["summary"]["all_success_criteria_met"]
##################
# Test request delta criterion with counter metric
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with metric_sample_size_query_template missing in payload")
parameters = {
"baseline": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"success_criteria": [
{
"metric_name": "iter8_error_count",
"is_counter": True,
"metric_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',response_code=~'5..',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"type": "delta",
"value": 0.5,
"sample_size": 0,
"stop_on_failure": False
}
]
},
"_last_state": {}
}
# Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
assert b'Delta criterion cannot be used with counter metric.' in resp.data
def test_baseline_failing_success_criteria(self):
"""Tests the REST endpoint /analytics/canary/check_and_increment."""
endpoint = f'http://localhost:5555/api/v1/analytics/canary/check_and_increment'
with requests_mock.mock() as m:
m.get(self.metrics_endpoint, json=json.load(open("tests/data/prometheus_baseline_failing_response.json")))
###################
# Test request when both candidate and baseline fail success criteria
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request when both candidate and baseline fail success criteria")
parameters = {
"baseline": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"success_criteria": [
{
"metric_name": "iter8_error_rate",
"is_counter": False,
"absent_value": "0.0",
"metric_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',response_code=~'5..',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"type": "threshold",
"value": 2,
"sample_size": 2,
"stop_on_failure": False
},
{
"metric_name": "iter8_error_rate",
"is_counter": False,
"absent_value": "0",
"metric_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',response_code=~'5..',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"type": "delta",
"value": 0.1,
"sample_size": 2,
"stop_on_failure": True
}
]
},
"_last_state": {
"baseline": {
"traffic_percentage":98.0,
"success_criterion_information":[[1,0.0]]
},
"candidate": {
"traffic_percentage":2,
"success_criterion_information":[[1,0.0]]
}
}
}
# Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
self.assertEqual(resp.status_code, 200, resp.data)
assert 'The baseline version did not meet success criteria' in resp.get_json()["assessment"]["summary"]["conclusions"]
def test_no_data_from_prometheus(self):
"""Tests the REST endpoint /analytics/canary/check_and_increment."""
endpoint = f'http://localhost:5555/api/v1/analytics/canary/check_and_increment'
with requests_mock.mock() as m:
m.get(self.metrics_endpoint, json=json.load(open("tests/data/prometheus_no_data_response.json")))
###################
# Test request with no data from prometheus- iter8_error_count
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with no data from prometheus")
parameters = {
"baseline": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"success_criteria": [
{
"metric_name": "iter8_error_rate",
"is_counter": False,
"absent_value": "0.0",
"metric_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',response_code=~'5..',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"type": "delta",
"value": 0.02,
"sample_size": 0,
"stop_on_failure": False
}
]
},
"_last_state": {}
}
# Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
self.assertEqual(resp.status_code, 200, resp.data)
###################
# Test request with no data from prometheus- iter8_latency
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with no data from prometheus")
parameters = {
"baseline": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"success_criteria": [
{
"metric_name": "iter8_latency",
"is_counter": False,
"absent_value": "None",
"metric_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',response_code=~'5..',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"type": "delta",
"value": 0.02,
"sample_size": 0,
"stop_on_failure": False
}
]
},
"_last_state": {}
}
# Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
self.assertEqual(resp.status_code, 200, resp.data)
#All tests after this involve the /analytics/canary/epsilon_t_greedy endpoint
def test_payload_canary_epsilon_t_greedy(self):
"""Tests the REST endpoint /analytics/canary/epsilon_t_greedy."""
endpoint = f'http://localhost:5555/api/v1/analytics/canary/epsilon_t_greedy'
with requests_mock.mock() as m:
m.get(self.metrics_endpoint, json=json.load(open("tests/data/prometheus_sample_response.json")))
###################
# Test request with some required parameters
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with some required parameters")
parameters = {
"baseline": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"success_criteria": [
{
"metric_name": "iter8_error_rate",
"is_counter": False,
"absent_value": "0.0",
"metric_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',response_code=~'5..',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"type": "delta",
"value": 0.02,
"sample_size": 0,
"stop_on_failure": False
}
]
},
"_last_state": {}
}
#Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
self.assertEqual(resp.status_code, 200, resp.data)
##################
# Test request with pre filled last state in payload on iteration 4
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with some required parameters")
parameters = {
"baseline": {
"start_time": "2019-04-24T19:40:32.017Z",
"end_time": "2019-04-24T20:30:02.389Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-04-24T19:40:32.017Z",
"end_time": "2019-04-24T20:30:02.389Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"max_traffic_percent": 100,
"success_criteria": [
{
"metric_name": "iter8_error_rate",
"is_counter": False,
"absent_value": "0.0",
"metric_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',response_code=~'5..',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"type": "delta",
"value": 0.02,
"sample_size": 0,
"stop_on_failure": False
}
]
},
"_last_state": {
"baseline": {
"traffic_percentage": 13,
"success_criterion_information": [
[
1,
2
]
]
},
"candidate": {
"traffic_percentage": 87,
"success_criterion_information": [
[
2,
3
]
]
},
"effective_iteration_count": 4
}
}
# Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
self.assertEqual(resp.status_code, 200, resp.data)
correct_response = {"baseline":{"traffic_percentage":10.0,"success_criterion_information":[[21,21.764]]},"candidate":{"traffic_percentage":90,"success_criterion_information":[[19,19.677]]},"effective_iteration_count":5}
self.assertEqual(resp.get_json()["_last_state"], correct_response)
##################
# Test request with pre filled last state in payload on iteration 5- when no change is observed
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with some required parameters")
parameters = {
"baseline": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"success_criteria": [
{
"metric_name": "iter8_error_rate",
"is_counter": False,
"absent_value": "0.0",
"metric_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',response_code=~'5..',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"type": "delta",
"value": 0.02,
"sample_size": 0,
"stop_on_failure": False
}
]
},
"_last_state": {
"baseline": {
"traffic_percentage": 10,
"success_criterion_information": [
[21,21.764]
]
},
"candidate": {
"traffic_percentage": 90,
"success_criterion_information": [
[19,19.677]
]
},
"effective_iteration_count": 5
}
}
# Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
self.assertEqual(resp.status_code, 200, resp.data)
correct_response = {"baseline":{"traffic_percentage":10.0,"success_criterion_information":[[21,21.764]]},"candidate":{"traffic_percentage":90,"success_criterion_information":[[19,19.677]]},"effective_iteration_count":5}
self.assertEqual(resp.get_json()["_last_state"], correct_response)
# Test request when candidate fails success criteria
def test_candidate_failing_success_criteria(self):
"""Tests the REST endpoint /analytics/canary/epsilon_t_greedy."""
endpoint = f'http://localhost:5555/api/v1/analytics/canary/epsilon_t_greedy'
with requests_mock.mock() as m:
m.get(self.metrics_endpoint, json=json.load(open("tests/data/prometheus_baseline_failing_response.json")))
###################
# Test request when both candidate and baseline fail success criteria
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request when both candidate and baseline fail success criteria")
parameters = {
"baseline": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"success_criteria": [
{
"metric_name": "iter8_error_count",
"is_counter": True,
"absent_value": "0.0",
"metric_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',response_code=~'5..',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"type": "threshold",
"value": 2,
"sample_size": 1,
"stop_on_failure": False
}
]
},
"_last_state": {
"baseline": {
"traffic_percentage": 90,
"success_criterion_information": [
[1,0.0]
]
},
"candidate": {
"traffic_percentage": 10,
"success_criterion_information": [
[1,0.0]
]
},
"effective_iteration_count": 3
}
}
# Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
self.assertEqual(resp.status_code, 200, resp.data)
assert 'The baseline version did not meet success criteria' in resp.get_json()["assessment"]["summary"]["conclusions"]
###################
# Test request when candidate fails success criteria because sample size requirements are not met
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request when both candidate and baseline fail success criteria")
parameters = {
"baseline": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"success_criteria": [
{
"metric_name": "iter8_error_count",
"is_counter": True,
"absent_value": "0.0",
"metric_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',response_code=~'5..',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"type": "threshold",
"value": 2,
"sample_size": 10,
"stop_on_failure": False
}
]
},
"_last_state": {
"baseline": {
"traffic_percentage": 90,
"success_criterion_information": [
[1,0.0]
]
},
"candidate": {
"traffic_percentage": 10,
"success_criterion_information": [
[1,0.0]
]
},
"effective_iteration_count": 3
}
}
# Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
self.assertEqual(resp.status_code, 200, resp.data)
correct_response = {'baseline': {'traffic_percentage': 90.0, 'success_criterion_information': [[4, 4.0]]}, 'candidate': {'traffic_percentage': 10, 'success_criterion_information': [[5, 5.0]]}, 'effective_iteration_count': 3}
self.assertEqual(correct_response, resp.get_json()["_last_state"])
#All tests after this involve the /analytics/canary/posterior_bayesian_routing endpoint
def test_payload_canary_posterior_bayesian_routing(self):
"""Tests the REST endpoint /analytics/canary/posterior_bayesian_routing."""
endpoint = f'http://localhost:5555/api/v1/analytics/canary/posterior_bayesian_routing'
with requests_mock.mock() as m:
m.get(self.metrics_endpoint, json=json.load(open("tests/data/prometheus_sample_response.json")))
###################
# Test request with some required parameters
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with some required parameters")
parameters = {
"baseline": {
"start_time": "2019-05-01T19:00:02.389Z",
"tags": {
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-05-01T19:00:02.389Z",
"tags": {
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"confidence": 0.9,
"success_criteria": [
{
"metric_name": "iter8_error_rate",
"is_counter": False,
"absent_value": "0",
"min_max": {
"min": 0,
"max": 1
},
"metric_query_template": "sum(increase(istio_requests_total{response_code=~\"5..\",reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"type": "threshold",
"value": 0.02,
"stop_on_failure": False
}
]
},
"_last_state": {}
}
#Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
self.assertEqual(resp.status_code, 200, resp.data)
###################
# Test request to check for default parameters
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with some required parameters")
parameters = {
"baseline": {
"start_time": "2019-05-01T19:00:02.389Z",
"tags": {
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-05-01T19:00:02.389Z",
"tags": {
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"success_criteria": [
{
"metric_name": "iter8_error_rate",
"is_counter": False,
"metric_query_template": "sum(increase(istio_requests_total{response_code=~\"5..\",reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"type": "threshold",
"value": 0.02
}
]
},
"_last_state": {}
}
#Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
self.assertEqual(resp.status_code, 200, resp.data)
###################
# Test request to with stop on failure=True
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with some required parameters")
parameters = {
"baseline": {
"start_time": "2019-05-01T19:00:02.389Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-05-01T19:00:02.389Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"success_criteria": [
{
"metric_name": "iter8_error_rate",
"is_counter": False,
"metric_query_template": "sum(increase(istio_requests_total{response_code=~\"5..\",reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"type": "threshold",
"value": 19,
"stop_on_failure": True
}
]
},
"_last_state": {}
}
#Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
correct_response = ["The experiment needs to be aborted", "All success criteria were not met", "Required confidence of 0.95 was not reached"]
self.assertEqual(correct_response, resp.get_json()["assessment"]["summary"]["conclusions"])
self.assertEqual(resp.status_code, 200, resp.data)
def test_payload_canary_bayesian_routing_high_sample_size(self):
"""Tests the REST endpoint /analytics/canary/posterior_bayesian_routing."""
endpoint = f'http://localhost:5555/api/v1/analytics/canary/posterior_bayesian_routing'
with requests_mock.mock() as m:
m.get(self.metrics_endpoint, json=json.load(open("tests/data/prometheus_sample_response_br.json")))
###################
# Test request with high sample size for high confidence results
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with some required parameters")
parameters = {
"baseline": {
"start_time": "2019-05-01T19:00:02.389Z",
"tags": {
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-05-01T19:00:02.389Z",
"tags": {
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"confidence": 0.5,
"success_criteria": [
{
"metric_name": "iter8_error_rate",
"is_counter": False,
"absent_value": "0",
"min_max": {
"min": 0,
"max": 1
},
"metric_query_template": "sum(increase(istio_requests_total{response_code=~\"5..\",reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"type": "threshold",
"value": 200000,
"stop_on_failure": False
}
]
},
"_last_state": {}
}
#Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
self.assertEqual(resp.status_code, 200, resp.data)
correct_response = ["All success criteria were met", "Required confidence of 0.5 was reached"]
self.assertEqual(correct_response, resp.get_json()["assessment"]["summary"]["conclusions"])
###################
# Test request with high sample size for high confidence results + multiple metrics
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with some required parameters")
parameters = {
"baseline": {
"start_time": "2019-05-01T19:00:02.389Z",
"tags": {
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-05-01T19:00:02.389Z",
"tags": {
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"confidence": 0.5,
"success_criteria": [
{
"metric_name": "iter8_error_rate",
"is_counter": False,
"absent_value": "0",
"min_max": {
"min": 0,
"max": 1
},
"metric_query_template": "sum(increase(istio_requests_total{response_code=~\"5..\",reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"type": "threshold",
"value": 200000,
"stop_on_failure": False
},
{
"metric_name": "iter8_error_count",
"is_counter": True,
"absent_value": "0",
"min_max": {
"min": 0,
"max": 1
},
"metric_query_template": "sum(increase(istio_requests_total{response_code=~\"5..\",reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"type": "threshold",
"value": 200000,
"stop_on_failure": False
}
]
},
"_last_state": {}
}
#Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
self.assertEqual(resp.status_code, 200, resp.data)
correct_response = ["All success criteria were met", "Required confidence of 0.5 was reached"]
self.assertEqual(correct_response, resp.get_json()["assessment"]["summary"]["conclusions"])
#All tests after this involve the /analytics/canary/optimistic_bayesian_routing endpoint
def test_payload_canary_optimistic_bayesian_routing(self):
"""Tests the REST endpoint /analytics/canary/optimistic_bayesian_routing."""
endpoint = f'http://localhost:5555/api/v1/analytics/canary/optimistic_bayesian_routing'
with requests_mock.mock() as m:
m.get(self.metrics_endpoint, json=json.load(open("tests/data/prometheus_sample_response.json")))
###################
# Test request with some required parameters
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with some required parameters")
parameters = {
"baseline": {
"start_time": "2019-05-01T19:00:02.389Z",
"tags": {
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-05-01T19:00:02.389Z",
"tags": {
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"confidence": 0.9,
"success_criteria": [
{
"metric_name": "iter8_error_rate",
"is_counter": False,
"absent_value": "0",
"min_max": {
"min": 0,
"max": 1
},
"metric_query_template": "sum(increase(istio_requests_total{response_code=~\"5..\",reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"type": "threshold",
"value": 0.02,
"stop_on_failure": False
}
]
},
"_last_state": {}
}
#Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
self.assertEqual(resp.status_code, 200, resp.data)
###################
# Test request to check for default parameters
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with some required parameters")
parameters = {
"baseline": {
"start_time": "2019-05-01T19:00:02.389Z",
"tags": {
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-05-01T19:00:02.389Z",
"tags": {
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"success_criteria": [
{
"metric_name": "iter8_error_rate",
"is_counter": False,
"metric_query_template": "sum(increase(istio_requests_total{response_code=~\"5..\",reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"type": "threshold",
"value": 0.02
}
]
},
"_last_state": {}
}
#Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
self.assertEqual(resp.status_code, 200, resp.data)
###################
# Test request to with stop on failure=True
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with some required parameters")
parameters = {
"baseline": {
"start_time": "2019-05-01T19:00:02.389Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-05-01T19:00:02.389Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"success_criteria": [
{
"metric_name": "iter8_error_rate",
"is_counter": False,
"metric_query_template": "sum(increase(istio_requests_total{response_code=~\"5..\",reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"type": "threshold",
"value": 19,
"stop_on_failure": True
}
]
},
"_last_state": {}
}
#Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
correct_response = ["The experiment needs to be aborted", "All success criteria were not met", "Required confidence of 0.95 was not reached"]
self.assertEqual(correct_response, resp.get_json()["assessment"]["summary"]["conclusions"])
self.assertEqual(resp.status_code, 200, resp.data)
def test_payload_canary_optimistic_bayesian_routing_high_sample_size(self):
"""Tests the REST endpoint /analytics/canary/optimistic_bayesian_routing."""
endpoint = f'http://localhost:5555/api/v1/analytics/canary/optimistic_bayesian_routing'
with requests_mock.mock() as m:
m.get(self.metrics_endpoint, json=json.load(open("tests/data/prometheus_sample_response_br.json")))
###################
# Test request with high sample size for high confidence results
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with some required parameters")
parameters = {
"baseline": {
"start_time": "2019-05-01T19:00:02.389Z",
"tags": {
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-05-01T19:00:02.389Z",
"tags": {
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"confidence": 0.5,
"success_criteria": [
{
"metric_name": "iter8_error_rate",
"is_counter": False,
"absent_value": "0",
"min_max": {
"min": 0,
"max": 1
},
"metric_query_template": "sum(increase(istio_requests_total{response_code=~\"5..\",reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"type": "threshold",
"value": 200000,
"stop_on_failure": False
}
]
},
"_last_state": {}
}
#Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
log.info(f"{resp.data}")
self.assertEqual(resp.status_code, 200, resp.data)
correct_response = ["All success criteria were met", "Required confidence of 0.5 was reached"]
self.assertEqual(correct_response, resp.get_json()["assessment"]["summary"]["conclusions"])
###################
# Test request with high sample size for high confidence results + multiple metrics
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with some required parameters")
parameters = {
"baseline": {
"start_time": "2019-05-01T19:00:02.389Z",
"tags": {
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-05-01T19:00:02.389Z",
"tags": {
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"confidence": 0.5,
"success_criteria": [
{
"metric_name": "iter8_error_rate",
"is_counter": False,
"absent_value": "0",
"min_max": {
"min": 0,
"max": 1
},
"metric_query_template": "sum(increase(istio_requests_total{response_code=~\"5..\",reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"type": "threshold",
"value": 200000,
"stop_on_failure": False
},
{
"metric_name": "iter8_error_count",
"is_counter": True,
"absent_value": "0",
"min_max": {
"min": 0,
"max": 1
},
"metric_query_template": "sum(increase(istio_requests_total{response_code=~\"5..\",reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"type": "threshold",
"value": 200000,
"stop_on_failure": False
}
]
},
"_last_state": {}
}
#Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
self.assertEqual(resp.status_code, 200, resp.data)
correct_response = ["All success criteria were met", "Required confidence of 0.5 was reached"]
self.assertEqual(correct_response, resp.get_json()["assessment"]["summary"]["conclusions"])
def test_no_data_canary_optimistic_bayesian_routing_high_sample_size(self):
"""Tests the REST endpoint /analytics/canary/optimistic_bayesian_routing."""
endpoint = f'http://localhost:5555/api/v1/analytics/canary/optimistic_bayesian_routing'
with requests_mock.mock() as m:
m.get(self.metrics_endpoint, json=json.load(open("tests/data/prometheus_no_data_response.json")))
###################
# Test request with no data for obr (first iteration)
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with some required parameters")
parameters = {
"baseline": {
"start_time": "2019-05-01T19:00:02.389Z",
"tags": {
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-05-01T19:00:02.389Z",
"tags": {
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"confidence": 0.5,
"success_criteria": [
{
"metric_name": "iter8_error_rate",
"is_counter": False,
"absent_value": "0",
"min_max": {
"min": 0,
"max": 1
},
"metric_query_template": "sum(increase(istio_requests_total{response_code=~\"5..\",reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"type": "threshold",
"value": 200000,
"stop_on_failure": False
}
]
},
"_last_state": {}
}
#Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
self.assertEqual(resp.status_code, 200, resp.data)
###################
# Test request with no data for obr (not first iteration + min-max available)
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with no data for obr (not first iteration + min-max available)")
params = namedtuple('params', 'alpha beta gamma sigma')
parameters = {
"baseline": {
"start_time": "2019-05-01T19:00:02.389Z",
"tags": {
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-05-01T19:00:02.389Z",
"tags": {
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"confidence": 0.5,
"success_criteria": [
{
"metric_name": "iter8_error_rate",
"is_counter": False,
"absent_value": "None",
"min_max": {
"min": 0,
"max": 1
},
"metric_query_template": "sum(increase(istio_requests_total{response_code=~\"5..\",reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"type": "threshold",
"value": 200000,
"stop_on_failure": False
}
]
},
"_last_state": {
"baseline": {
"success_criterion_belief": [params(1, 1, None, None)],
"reward_belief": params(None, None, None, None)
},
"candidate": {
"success_criterion_belief": [params(1, 1, None, None)],
"reward_belief": params(None, None, None, None)
}
}
}
#Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
self.assertEqual(resp.status_code, 200, resp.data)
###################
# Test request with no data for obr (not first iteration + min-max not available)
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with no data for obr (not first iteration + min-max not available)")
params = namedtuple('params', 'alpha beta gamma sigma')
parameters = {
"baseline": {
"start_time": "2019-05-01T19:00:02.389Z",
"tags": {
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-05-01T19:00:02.389Z",
"tags": {
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"confidence": 0.5,
"success_criteria": [
{
"metric_name": "iter8_error_rate",
"is_counter": False,
"absent_value": "None",
"metric_query_template": "sum(increase(istio_requests_total{response_code=~\"5..\",reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"type": "threshold",
"value": 200000,
"stop_on_failure": False
}
]
},
"_last_state": {
"baseline": {
"success_criterion_belief": [params(None, None, 0, 1)],
"reward_belief": params(None, None, None, None)
},
"candidate": {
"success_criterion_belief": [params(None, None, 0, 1)],
"reward_belief": params(None, None, None, None)
}
}
}
#Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
self.assertEqual(resp.status_code, 200, resp.data)
##All tests after this involve the /analytics/canary/check_and_increment endpoint for A/B experiments
def test_payload_ab_check_and_increment(self):
"""Tests the REST endpoint /analytics/ab/check_and_increment."""
endpoint = f'http://localhost:5555/api/v1/analytics/canary/check_and_increment'
with requests_mock.mock() as m:
m.get(self.metrics_endpoint, json=json.load(open("tests/data/prometheus_sample_response.json")))
###################
# Test request with some required parameters
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with some required parameters")
parameters = {
"baseline": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"success_criteria": [
{
"metric_name": "iter8_error_rate",
"is_counter": False,
"absent_value": "0.0",
"metric_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',response_code=~'5..',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"type": "delta",
"value": 0.02,
"sample_size": 0,
"stop_on_failure": False
}
],
"reward": {
"metric_name": "iter8_error_rate",
"is_counter": False,
"absent_value": "0.0",
"metric_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',response_code=~'5..',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)"
}
},
"_last_state": {}
}
# Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
self.assertEqual(resp.status_code, 200, resp.data)
#All tests after this involve the /analytics/canary/epsilon_t_greedy endpoint for A/B experiments
def test_payload_ab_epsilon_t_greedy(self):
"""Tests the REST endpoint /analytics/ab/epsilon_t_greedy."""
endpoint = f'http://localhost:5555/api/v1/analytics/canary/epsilon_t_greedy'
with requests_mock.mock() as m:
m.get(self.metrics_endpoint, json=json.load(open("tests/data/prometheus_sample_response.json")))
###################
# Test request with some required parameters
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with some required parameters")
parameters = {
"baseline": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"success_criteria": [
{
"metric_name": "iter8_error_rate",
"is_counter": False,
"absent_value": "0.0",
"metric_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',response_code=~'5..',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"type": "delta",
"value": 0.02,
"sample_size": 0,
"stop_on_failure": False
}
],
"reward": {
"metric_name": "iter8_error_rate",
"is_counter": False,
"absent_value": "0.0",
"metric_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',response_code=~'5..',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)"
}
},
"_last_state": {}
}
#Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
self.assertEqual(resp.status_code, 200, resp.data)
#All tests after this involve the /analytics/canary/posterior_bayesian_routing endpoint for A/B experiments
def test_payload_ab_posterior_bayesian_routing(self):
"""Tests the REST endpoint /analytics/ab/posterior_bayesian_routing."""
endpoint = f'http://localhost:5555/api/v1/analytics/canary/posterior_bayesian_routing'
with requests_mock.mock() as m:
m.get(self.metrics_endpoint, json=json.load(open("tests/data/prometheus_sample_response.json")))
###################
# Test request with some required parameters
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with some required parameters")
parameters = {
"baseline": {
"start_time": "2019-05-01T19:00:02.389Z",
"tags": {
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-05-01T19:00:02.389Z",
"tags": {
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"confidence": 0.9,
"success_criteria": [
{
"metric_name": "iter8_error_rate",
"is_counter": False,
"absent_value": "0",
"min_max": {
"min": 0,
"max": 1
},
"metric_query_template": "sum(increase(istio_requests_total{response_code=~\"5..\",reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"type": "threshold",
"value": 0.02,
"stop_on_failure": False
}
],
"reward": {
"metric_name": "iter8_error_rate",
"is_counter": False,
"absent_value": "0.0",
"metric_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',response_code=~'5..',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)"
}
},
"_last_state": {}
}
#Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
self.assertEqual(resp.status_code, 200, resp.data)
#All tests after this involve the /analytics/canary/optimistic_bayesian_routing endpoint for A/B experiments
def test_payload_ab_optimistic_bayesian_routing(self):
"""Tests the REST endpoint /analytics/ab/optimistic_bayesian_routing."""
endpoint = f'http://localhost:5555/api/v1/analytics/canary/optimistic_bayesian_routing'
with requests_mock.mock() as m:
m.get(self.metrics_endpoint, json=json.load(open("tests/data/prometheus_sample_response.json")))
###################
# Test request with some required parameters
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with some required parameters")
parameters = {
"baseline": {
"start_time": "2019-05-01T19:00:02.389Z",
"tags": {
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-05-01T19:00:02.389Z",
"tags": {
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"confidence": 0.9,
"success_criteria": [
{
"metric_name": "iter8_error_rate",
"is_counter": False,
"absent_value": "0",
"min_max": {
"min": 0,
"max": 1
},
"metric_query_template": "sum(increase(istio_requests_total{response_code=~\"5..\",reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"type": "threshold",
"value": 0.02,
"stop_on_failure": False
}
],
"reward": {
"metric_name": "iter8_error_rate",
"is_counter": False,
"absent_value": "0.0",
"metric_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',response_code=~'5..',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)"
}
},
"_last_state": {}
}
#Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
self.assertEqual(resp.status_code, 200, resp.data)
#############################################################
##The next tests use the Experiment namespace which will be retained after a controller fix
#############################################################
##All tests after this involve the /experiment/check_and_increment endpoint (until mentioned otherwise)
def test_payload_experiment_canary_check_and_increment(self):
"""Tests the REST endpoint /experiment/check_and_increment."""
endpoint = f'http://localhost:5555/api/v1/experiment/check_and_increment'
with requests_mock.mock() as m:
m.get(self.metrics_endpoint, json=json.load(open("tests/data/prometheus_sample_response.json")))
###################
# Test request with some required parameters
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with some required parameters")
parameters = {
"baseline": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"success_criteria": [
{
"metric_name": "iter8_error_rate",
"is_counter": False,
"absent_value": "0.0",
"metric_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',response_code=~'5..',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"type": "delta",
"value": 0.02,
"sample_size": 0,
"stop_on_failure": False
}
]
},
"_last_state": {}
}
# Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
self.assertEqual(resp.status_code, 200, resp.data)
###################
# Test request with no change observed wrt sample size or metric parameters
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with no change observed wrt sample size or metric parameters")
parameters = {
"baseline": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"success_criteria": [
{
"metric_name": "iter8_error_rate",
"is_counter": False,
"absent_value": "0.0",
"metric_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',response_code=~'5..',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"type": "delta",
"value": 0.02,
"sample_size": 0,
"stop_on_failure": False
}
]
},
"_last_state": {
"baseline": {
"traffic_percentage": 75,
"success_criterion_information": [
[0,0]]
},
"candidate": {
"traffic_percentage": 25,
"success_criterion_information": [
[0,0]]
}
}
}
# Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
correct_response = {"baseline":{"traffic_percentage":73.0,"success_criterion_information":[[21,21.764]]},"candidate":{"traffic_percentage":27,"success_criterion_information":[[19,19.677]]}}
self.assertEqual(resp.status_code, 200, resp.data)
self.assertEqual(resp.get_json()["_last_state"], correct_response)
##################
# Test request with start_time missing in payload
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with start_time missing in payload")
parameters = {
"baseline": {
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"success_criteria": [
{
"metric_name": "iter8_error_count",
"is_counter": True,
"absent_value": "0.0",
"metric_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',response_code=~'5..',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"type": "delta",
"value": 0.02,
"sample_size": 0,
"stop_on_failure": False
}
]
},
"_last_state": {}
}
# Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
# We should get a BAD REQUEST HTTP error
self.assertEqual(resp.status_code, 400, 'Missing start_time parameter')
assert b'\'start_time\' is a required property' in resp.data
##################
# Test request with success_criteria missing in payload
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with success_criteria missing in payload")
parameters = {
"baseline": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
},
"_last_state": {}
}
# Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
# We should get a BAD REQUEST HTTP error
self.assertEqual(resp.status_code, 400, 'Missing success_criteria missing in payload')
assert b'\'success_criteria\' is a required property' in resp.data
###################
# Test request with baseline missing in payload
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with baseline missing in payload")
parameters = {
"candidate": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"success_criteria": [
{
"metric_name": "iter8_error_count",
"is_counter": True,
"absent_value": "0.0",
"metric_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',response_code=~'5..',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"type": "delta",
"value": 0.02,
"sample_size": 0,
"stop_on_failure": False
}
]
},
"_last_state": {}
}
# Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
# We should get a BAD REQUEST HTTP error
self.assertEqual(resp.status_code, 400, 'Baseline missing in payload')
assert b'\'baseline\' is a required property' in resp.data
###################
# Test request with missing value in success_criteria
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with missing value in success_criteria")
parameters = {
"baseline": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"success_criteria": [
{
"metric_name": "iter8_error_count",
"is_counter": True,
"absent_value": "0.0",
"metric_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',response_code=~'5..',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"type": "delta",
"sample_size": 0,
"stop_on_failure": False
}
]
},
"_last_state": {}
}
# Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
self.assertEqual(resp.status_code, 400, 'Missing value in success_criteria')
assert b'\'value\' is a required property' in resp.data
###################
# Test request with Unknown type in is_counter
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with Unknown type in is_counter")
parameters = {
"baseline": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"success_criteria": [
{
"metric_name": "iter8_error_count",
"is_counter": "No",
"absent_value": "0.0",
"metric_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',response_code=~'5..',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"type": "normal",
"value": 0.02,
"sample_size": 0,
"stop_on_failure": False
}
]
},
"_last_state": {}
}
# Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
self.assertEqual(resp.status_code, 400, 'Unknown type in is_counter')
assert b"\'No\' is not of type \'boolean\'" in resp.data
assert b'\'normal\' is not one of [\'delta\', \'threshold\']' in resp.data
##################
# Test request with is_counter missing in payload
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with is_counter missing in payload")
parameters = {
"baseline": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"success_criteria": [
{
"metric_name": "iter8_error_count",
"metric_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',response_code=~'5..',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"type": "threshold",
"value": 0.02,
"sample_size": 0,
"stop_on_failure": False
}
]
},
"_last_state": {}
}
# Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
# We should get a BAD REQUEST HTTP error
self.assertEqual(resp.status_code, 400, 'is_counter missing in payload')
assert b'\'is_counter\' is a required property' in resp.data
#assert b'\'absent_value\' is a required property' in resp.data
##################
# Test request with absent value of type float in payload
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with absent value of type float in payload")
parameters = {
"baseline": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"success_criteria": [
{
"metric_name": "iter8_error_count",
"is_counter": True,
"absent_value": 0,
"metric_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',response_code=~'5..',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"type": "threshold",
"value": 0.02,
"sample_size": 0,
"stop_on_failure": False
}
]
},
"_last_state": {}
}
# Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
# We should get a BAD REQUEST HTTP error
self.assertEqual(resp.status_code, 400, 'new absent_value type in payload')
assert b'0 is not of type \'string\'' in resp.data
##################
# Test request with metric_query_template missing in payload
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with metric_query_template missing in payload")
parameters = {
"baseline": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"success_criteria": [
{
"metric_name": "iter8_error_count",
"is_counter": True,
"absent_value": "0.0",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"type": "threshold",
"value": 0.02,
"sample_size": 0,
"stop_on_failure": False
}
]
},
"_last_state": {}
}
# Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
# We should get a BAD REQUEST HTTP error
self.assertEqual(resp.status_code, 400, 'metric_query_template missing in payload')
assert b'\'metric_query_template\' is a required property' in resp.data
##################
# Test request with metric_sample_size_query_template missing in payload
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with metric_sample_size_query_template missing in payload")
parameters = {
"baseline": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"success_criteria": [
{
"metric_name": "iter8_error_count",
"is_counter": True,
"absent_value": "0.0",
"metric_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',response_code=~'5..',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"type": "threshold",
"value": 0.02,
"sample_size": 0,
"stop_on_failure": False
}
]
},
"_last_state": {}
}
# Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
# We should get a BAD REQUEST HTTP error
self.assertEqual(resp.status_code, 400, 'metric_query_template missing in payload')
assert b'\'metric_sample_size_query_template\' is a required property' in resp.data
##################
# Test request threshold crossing in a counter metric
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request threshold crossing in a counter metric")
parameters = {
"baseline": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"success_criteria": [
{
"metric_name": "iter8_error_count",
"is_counter": True,
"metric_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',response_code=~'5..',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"type": "threshold",
"value": 18,
"sample_size": 0,
"stop_on_failure": False
}
]
},
"_last_state": {}
}
# Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
assert resp.get_json()["assessment"]["summary"]["abort_experiment"]
assert not resp.get_json()["assessment"]["summary"]["all_success_criteria_met"]
##################
# Test request delta criterion with counter metric
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request delta criterion with counter metric")
parameters = {
"baseline": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"success_criteria": [
{
"metric_name": "iter8_error_count",
"is_counter": True,
"metric_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',response_code=~'5..',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"type": "delta",
"value": 0.5,
"sample_size": 0,
"stop_on_failure": False
}
]
},
"_last_state": {}
}
# Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
assert b'Delta criterion cannot be used with counter metric.' in resp.data
def test_experiment_baseline_failing_success_criteria(self):
"""Tests the REST endpoint /experiment/check_and_increment."""
endpoint = f'http://localhost:5555/api/v1/experiment/check_and_increment'
with requests_mock.mock() as m:
m.get(self.metrics_endpoint, json=json.load(open("tests/data/prometheus_baseline_failing_response.json")))
###################
# Test request when both candidate and baseline fail success criteria
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request when both candidate and baseline fail success criteria")
parameters = {
"baseline": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"success_criteria": [
{
"metric_name": "iter8_error_rate",
"is_counter": False,
"absent_value": "0.0",
"metric_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',response_code=~'5..',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"type": "threshold",
"value": 2,
"sample_size": 2,
"stop_on_failure": False
},
{
"metric_name": "iter8_error_rate",
"is_counter": False,
"absent_value": "0",
"metric_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',response_code=~'5..',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"type": "delta",
"value": 0.1,
"sample_size": 2,
"stop_on_failure": True
}
]
},
"_last_state": {
"baseline": {
"traffic_percentage":98.0,
"success_criterion_information":[[1,0.0]]
},
"candidate": {
"traffic_percentage":2,
"success_criterion_information":[[1,0.0]]
}
}
}
# Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
self.assertEqual(resp.status_code, 200, resp.data)
assert 'The baseline version did not meet success criteria' in resp.get_json()["assessment"]["summary"]["conclusions"]
def test_experiment_no_data_from_prometheus(self):
"""Tests the REST endpoint /experiment/check_and_increment."""
endpoint = f'http://localhost:5555/api/v1/experiment/check_and_increment'
with requests_mock.mock() as m:
m.get(self.metrics_endpoint, json=json.load(open("tests/data/prometheus_no_data_response.json")))
###################
# Test request with no data from prometheus- iter8_error_count
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with no data from prometheus")
parameters = {
"baseline": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"success_criteria": [
{
"metric_name": "iter8_error_rate",
"is_counter": False,
"absent_value": "0.0",
"metric_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',response_code=~'5..',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"type": "delta",
"value": 0.02,
"sample_size": 0,
"stop_on_failure": False
}
]
},
"_last_state": {}
}
# Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
self.assertEqual(resp.status_code, 200, resp.data)
###################
# Test request with no data from prometheus- iter8_latency
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with no data from prometheus")
parameters = {
"baseline": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"success_criteria": [
{
"metric_name": "iter8_latency",
"is_counter": False,
"absent_value": "None",
"metric_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',response_code=~'5..',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"type": "delta",
"value": 0.02,
"sample_size": 0,
"stop_on_failure": False
}
]
},
"_last_state": {}
}
# Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
self.assertEqual(resp.status_code, 200, resp.data)
#All tests after this involve the /experiment/epsilon_t_greedy endpoint
def test_experiment_payload_canary_epsilon_t_greedy(self):
"""Tests the REST endpoint /experiment/epsilon_t_greedy."""
endpoint = f'http://localhost:5555/api/v1/experiment/epsilon_t_greedy'
with requests_mock.mock() as m:
m.get(self.metrics_endpoint, json=json.load(open("tests/data/prometheus_sample_response.json")))
###################
# Test request with some required parameters
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with some required parameters")
parameters = {
"baseline": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"success_criteria": [
{
"metric_name": "iter8_error_rate",
"is_counter": False,
"absent_value": "0.0",
"metric_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',response_code=~'5..',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"type": "delta",
"value": 0.02,
"sample_size": 0,
"stop_on_failure": False
}
]
},
"_last_state": {}
}
#Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
self.assertEqual(resp.status_code, 200, resp.data)
##################
# Test request with pre filled last state in payload on iteration 4
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with pre filled last state in payload on iteration 4")
parameters = {
"baseline": {
"start_time": "2019-04-24T19:40:32.017Z",
"end_time": "2019-04-24T20:30:02.389Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-04-24T19:40:32.017Z",
"end_time": "2019-04-24T20:30:02.389Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"max_traffic_percent": 100,
"success_criteria": [
{
"metric_name": "iter8_error_rate",
"is_counter": False,
"absent_value": "0.0",
"metric_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',response_code=~'5..',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"type": "delta",
"value": 0.02,
"sample_size": 0,
"stop_on_failure": False
}
]
},
"_last_state": {
"baseline": {
"traffic_percentage": 13,
"success_criterion_information": [
[
1,
2
]
]
},
"candidate": {
"traffic_percentage": 87,
"success_criterion_information": [
[
2,
3
]
]
},
"effective_iteration_count": 4
}
}
# Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
self.assertEqual(resp.status_code, 200, resp.data)
correct_response = {"baseline":{"traffic_percentage":10.0,"success_criterion_information":[[21,21.764]]},"candidate":{"traffic_percentage":90,"success_criterion_information":[[19,19.677]]},"effective_iteration_count":5}
self.assertEqual(resp.get_json()["_last_state"], correct_response)
##################
# Test request with pre filled last state in payload on iteration 5- when no change is observed
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with pre filled last state in payload on iteration 5- when no change is observed")
parameters = {
"baseline": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"success_criteria": [
{
"metric_name": "iter8_error_rate",
"is_counter": False,
"absent_value": "0.0",
"metric_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',response_code=~'5..',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"type": "delta",
"value": 0.02,
"sample_size": 0,
"stop_on_failure": False
}
]
},
"_last_state": {
"baseline": {
"traffic_percentage": 10,
"success_criterion_information": [
[21,21.764]
]
},
"candidate": {
"traffic_percentage": 90,
"success_criterion_information": [
[19,19.677]
]
},
"effective_iteration_count": 5
}
}
# Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
self.assertEqual(resp.status_code, 200, resp.data)
correct_response = {"baseline":{"traffic_percentage":10.0,"success_criterion_information":[[21,21.764]]},"candidate":{"traffic_percentage":90,"success_criterion_information":[[19,19.677]]},"effective_iteration_count":5}
self.assertEqual(resp.get_json()["_last_state"], correct_response)
# Test request when candidate fails success criteria
def test_experiment_candidate_failing_success_criteria(self):
"""Tests the REST endpoint /experiment/epsilon_t_greedy."""
endpoint = f'http://localhost:5555/api/v1/experiment/epsilon_t_greedy'
with requests_mock.mock() as m:
m.get(self.metrics_endpoint, json=json.load(open("tests/data/prometheus_baseline_failing_response.json")))
###################
# Test request when both candidate and baseline fail success criteria
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request when both candidate and baseline fail success criteria")
parameters = {
"baseline": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"success_criteria": [
{
"metric_name": "iter8_error_count",
"is_counter": True,
"absent_value": "0.0",
"metric_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',response_code=~'5..',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"type": "threshold",
"value": 2,
"sample_size": 1,
"stop_on_failure": False
}
]
},
"_last_state": {
"baseline": {
"traffic_percentage": 90,
"success_criterion_information": [
[1,0.0]
]
},
"candidate": {
"traffic_percentage": 10,
"success_criterion_information": [
[1,0.0]
]
},
"effective_iteration_count": 3
}
}
# Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
self.assertEqual(resp.status_code, 200, resp.data)
assert 'The baseline version did not meet success criteria' in resp.get_json()["assessment"]["summary"]["conclusions"]
###################
# Test request when candidate fails success criteria because sample size requirements are not met
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request when candidate fails success criteria because sample size requirements are not met")
parameters = {
"baseline": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"success_criteria": [
{
"metric_name": "iter8_error_count",
"is_counter": True,
"absent_value": "0.0",
"metric_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',response_code=~'5..',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"type": "threshold",
"value": 2,
"sample_size": 10,
"stop_on_failure": False
}
]
},
"_last_state": {
"baseline": {
"traffic_percentage": 90,
"success_criterion_information": [
[1,0.0]
]
},
"candidate": {
"traffic_percentage": 10,
"success_criterion_information": [
[1,0.0]
]
},
"effective_iteration_count": 3
}
}
# Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
self.assertEqual(resp.status_code, 200, resp.data)
correct_response = {'baseline': {'traffic_percentage': 90.0, 'success_criterion_information': [[4, 4.0]]}, 'candidate': {'traffic_percentage': 10, 'success_criterion_information': [[5, 5.0]]}, 'effective_iteration_count': 3}
self.assertEqual(correct_response, resp.get_json()["_last_state"])
#All tests after this involve the /experiment/posterior_bayesian_routing endpoint
def test_experiment_payload_canary_posterior_bayesian_routing(self):
"""Tests the REST endpoint /experiment/posterior_bayesian_routing."""
endpoint = f'http://localhost:5555/api/v1/experiment/posterior_bayesian_routing'
with requests_mock.mock() as m:
m.get(self.metrics_endpoint, json=json.load(open("tests/data/prometheus_sample_response.json")))
###################
# Test request with some required parameters
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with some required parameters")
parameters = {
"baseline": {
"start_time": "2019-05-01T19:00:02.389Z",
"tags": {
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-05-01T19:00:02.389Z",
"tags": {
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"confidence": 0.9,
"success_criteria": [
{
"metric_name": "iter8_error_rate",
"is_counter": False,
"absent_value": "0",
"min_max": {
"min": 0,
"max": 1
},
"metric_query_template": "sum(increase(istio_requests_total{response_code=~\"5..\",reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"type": "threshold",
"value": 0.02,
"stop_on_failure": False
}
]
},
"_last_state": {
}
}
#Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
self.assertEqual(resp.status_code, 200, resp.data)
###################
# Test request to check for default parameters
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request to check for default parameters")
parameters = {
"baseline": {
"start_time": "2019-05-01T19:00:02.389Z",
"tags": {
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-05-01T19:00:02.389Z",
"tags": {
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"success_criteria": [
{
"metric_name": "iter8_error_rate",
"is_counter": False,
"metric_query_template": "sum(increase(istio_requests_total{response_code=~\"5..\",reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"type": "threshold",
"value": 0.02
}
]
},
"_last_state": {
}
}
#Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
self.assertEqual(resp.status_code, 200, resp.data)
###################
# Test request to with stop on failure=True
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request to with stop on failure=True")
parameters = {
"baseline": {
"start_time": "2019-05-01T19:00:02.389Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-05-01T19:00:02.389Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"success_criteria": [
{
"metric_name": "iter8_error_rate",
"is_counter": False,
"metric_query_template": "sum(increase(istio_requests_total{response_code=~\"5..\",reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"type": "threshold",
"value": 19,
"stop_on_failure": True
}
]
},
"_last_state": {
}
}
#Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
correct_response = ["The experiment needs to be aborted", "All success criteria were not met", "Required confidence of 0.95 was not reached"]
self.assertEqual(correct_response, resp.get_json()["assessment"]["summary"]["conclusions"])
self.assertEqual(resp.status_code, 200, resp.data)
def test_experiment_payload_canary_bayesian_routing_high_sample_size(self):
"""Tests the REST endpoint /experiment/posterior_bayesian_routing."""
endpoint = f'http://localhost:5555/api/v1/experiment/posterior_bayesian_routing'
with requests_mock.mock() as m:
m.get(self.metrics_endpoint, json=json.load(open("tests/data/prometheus_sample_response_br.json")))
###################
# Test request with high sample size for high confidence results
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with high sample size for high confidence results")
parameters = {
"baseline": {
"start_time": "2019-05-01T19:00:02.389Z",
"tags": {
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-05-01T19:00:02.389Z",
"tags": {
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"confidence": 0.5,
"success_criteria": [
{
"metric_name": "iter8_error_rate",
"is_counter": False,
"absent_value": "0",
"min_max": {
"min": 0,
"max": 1
},
"metric_query_template": "sum(increase(istio_requests_total{response_code=~\"5..\",reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"type": "threshold",
"value": 200000,
"stop_on_failure": False
}
]
},
"_last_state": {
}
}
#Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
self.assertEqual(resp.status_code, 200, resp.data)
correct_response = ["All success criteria were met", "Required confidence of 0.5 was reached"]
self.assertEqual(correct_response, resp.get_json()["assessment"]["summary"]["conclusions"])
###################
# Test request with high sample size for high confidence results + multiple metrics
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with high sample size for high confidence results + multiple metrics")
parameters = {
"baseline": {
"start_time": "2019-05-01T19:00:02.389Z",
"tags": {
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-05-01T19:00:02.389Z",
"tags": {
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"confidence": 0.5,
"success_criteria": [
{
"metric_name": "iter8_error_rate",
"is_counter": False,
"absent_value": "0",
"min_max": {
"min": 0,
"max": 1
},
"metric_query_template": "sum(increase(istio_requests_total{response_code=~\"5..\",reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"type": "threshold",
"value": 200000,
"stop_on_failure": False
},
{
"metric_name": "iter8_error_count",
"is_counter": True,
"absent_value": "0",
"min_max": {
"min": 0,
"max": 1
},
"metric_query_template": "sum(increase(istio_requests_total{response_code=~\"5..\",reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"type": "threshold",
"value": 200000,
"stop_on_failure": False
}
]
},
"_last_state": {
}
}
#Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
self.assertEqual(resp.status_code, 200, resp.data)
correct_response = ["All success criteria were met", "Required confidence of 0.5 was reached"]
self.assertEqual(correct_response, resp.get_json()["assessment"]["summary"]["conclusions"])
#All tests after this involve the /experiment/optimistic_bayesian_routing endpoint
def test_experiment_payload_canary_optimistic_bayesian_routing(self):
"""Tests the REST endpoint /experiment/optimistic_bayesian_routing."""
endpoint = f'http://localhost:5555/api/v1/experiment/optimistic_bayesian_routing'
with requests_mock.mock() as m:
m.get(self.metrics_endpoint, json=json.load(open("tests/data/prometheus_sample_response.json")))
###################
# Test request with some required parameters
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with some required parameters")
parameters = {
"baseline": {
"start_time": "2019-05-01T19:00:02.389Z",
"tags": {
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-05-01T19:00:02.389Z",
"tags": {
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"confidence": 0.9,
"success_criteria": [
{
"metric_name": "iter8_error_rate",
"is_counter": False,
"absent_value": "0",
"min_max": {
"min": 0,
"max": 1
},
"metric_query_template": "sum(increase(istio_requests_total{response_code=~\"5..\",reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"type": "threshold",
"value": 0.02,
"stop_on_failure": False
}
]
},
"_last_state": {
}
}
#Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
self.assertEqual(resp.status_code, 200, resp.data)
###################
# Test request to check for default parameters
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request to check for default parameters")
parameters = {
"baseline": {
"start_time": "2019-05-01T19:00:02.389Z",
"tags": {
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-05-01T19:00:02.389Z",
"tags": {
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"success_criteria": [
{
"metric_name": "iter8_error_rate",
"is_counter": False,
"metric_query_template": "sum(increase(istio_requests_total{response_code=~\"5..\",reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"type": "threshold",
"value": 0.02
}
]
},
"_last_state": {
}
}
#Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
self.assertEqual(resp.status_code, 200, resp.data)
###################
# Test request to with stop on failure=True
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request to with stop on failure=True")
parameters = {
"baseline": {
"start_time": "2019-05-01T19:00:02.389Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-05-01T19:00:02.389Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"success_criteria": [
{
"metric_name": "iter8_error_rate",
"is_counter": False,
"metric_query_template": "sum(increase(istio_requests_total{response_code=~\"5..\",reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"type": "threshold",
"value": 19,
"stop_on_failure": True
}
]
},
"_last_state": {
}
}
#Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
correct_response = ["The experiment needs to be aborted", "All success criteria were not met", "Required confidence of 0.95 was not reached"]
self.assertEqual(correct_response, resp.get_json()["assessment"]["summary"]["conclusions"])
self.assertEqual(resp.status_code, 200, resp.data)
###################
# Test request payload with no last state and no min max value
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request to with stop on failure=True")
params = namedtuple('params', 'alpha beta gamma sigma')
m.get(self.metrics_endpoint, json=json.load(open("tests/data/prometheus_sample_response.json")))
parameters = {
"name":"reviews-e479be4",
"baseline":{"start_time":"2020-03-30T14:33:38Z","end_time":"2020-03-30T14:33:38Z","tags":{"destination_service_namespace":"br","destination_workload":"reviews-509c700"}},
"candidate":{"start_time":"2020-03-30T14:33:38Z","end_time":"2020-03-30T14:33:38Z","tags":{"destination_service_namespace":"br","destination_workload":"reviews-e479be4"}},
"_last_state":{},
"traffic_control":{
"confidence":0.98,
"max_traffic_percent":95,
"success_criteria":[
{
"absent_value":"None",
"is_counter":False,
"metric_name":"iter8_latency",
"metric_query_template":"(sum(increase(istio_request_duration_seconds_sum{source_workload_namespace!='knative-serving',reporter='source'}[$interval]$offset_str)) by ($entity_labels)) / (sum(increase(istio_request_duration_seconds_count{source_workload_namespace!='knative-serving',reporter='source'}[$interval]$offset_str)) by ($entity_labels))",
"metric_sample_size_query_template":"sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"stop_on_failure":False,
"type":"threshold",
"value":0.2
}
]
}
}
resp = self.flask_test.post(endpoint, json=parameters)
self.assertEqual(resp.status_code, 200, resp.data)
def test_experiment_payload_canary_optimistic_bayesian_routing_high_sample_size(self):
"""Tests the REST endpoint /experiment/optimistic_bayesian_routing."""
endpoint = f'http://localhost:5555/api/v1/experiment/optimistic_bayesian_routing'
with requests_mock.mock() as m:
m.get(self.metrics_endpoint, json=json.load(open("tests/data/prometheus_sample_response_br.json")))
###################
# Test request with high sample size for high confidence results
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with high sample size for high confidence results")
parameters = {
"baseline": {
"start_time": "2019-05-01T19:00:02.389Z",
"tags": {
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-05-01T19:00:02.389Z",
"tags": {
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"confidence": 0.5,
"success_criteria": [
{
"metric_name": "iter8_error_rate",
"is_counter": False,
"absent_value": "0",
"min_max": {
"min": 0,
"max": 1
},
"metric_query_template": "sum(increase(istio_requests_total{response_code=~\"5..\",reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"type": "threshold",
"value": 200000,
"stop_on_failure": False
}
]
},
"_last_state": {
}
}
#Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
self.assertEqual(resp.status_code, 200, resp.data)
correct_response = ["All success criteria were met", "Required confidence of 0.5 was reached"]
self.assertEqual(correct_response, resp.get_json()["assessment"]["summary"]["conclusions"])
###################
# Test request with high sample size for high confidence results + multiple metrics
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with high sample size for high confidence results + multiple metrics")
parameters = {
"baseline": {
"start_time": "2019-05-01T19:00:02.389Z",
"tags": {
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-05-01T19:00:02.389Z",
"tags": {
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"confidence": 0.5,
"success_criteria": [
{
"metric_name": "iter8_error_rate",
"is_counter": False,
"absent_value": "0",
"min_max": {
"min": 0,
"max": 1
},
"metric_query_template": "sum(increase(istio_requests_total{response_code=~\"5..\",reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"type": "threshold",
"value": 200000,
"stop_on_failure": False
},
{
"metric_name": "iter8_error_count",
"is_counter": True,
"absent_value": "0",
"min_max": {
"min": 0,
"max": 1
},
"metric_query_template": "sum(increase(istio_requests_total{response_code=~\"5..\",reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"type": "threshold",
"value": 200000,
"stop_on_failure": False
}
]
},
"_last_state": {
}
}
#Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
self.assertEqual(resp.status_code, 200, resp.data)
correct_response = ["All success criteria were met", "Required confidence of 0.5 was reached"]
self.assertEqual(correct_response, resp.get_json()["assessment"]["summary"]["conclusions"])
def test_experiment_no_data_canary_optimistic_bayesian_routing_high_sample_size(self):
"""Tests the REST endpoint /experiment/optimistic_bayesian_routing."""
endpoint = f'http://localhost:5555/api/v1/experiment/optimistic_bayesian_routing'
with requests_mock.mock() as m:
m.get(self.metrics_endpoint, json=json.load(open("tests/data/prometheus_no_data_response.json")))
###################
# Test request with no data for obr (first iteration)
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with no data for obr (first iteration)")
parameters = {
"baseline": {
"start_time": "2019-05-01T19:00:02.389Z",
"tags": {
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-05-01T19:00:02.389Z",
"tags": {
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"confidence": 0.5,
"success_criteria": [
{
"metric_name": "iter8_error_rate",
"is_counter": False,
"absent_value": "0",
"min_max": {
"min": 0,
"max": 1
},
"metric_query_template": "sum(increase(istio_requests_total{response_code=~\"5..\",reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"type": "threshold",
"value": 200000,
"stop_on_failure": False
}
]
},
"_last_state": {}
}
#Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
self.assertEqual(resp.status_code, 200, resp.data)
###################
# Test request with no data for obr (not first iteration + min-max available)
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with no data for obr (not first iteration + min-max available)")
params = namedtuple('params', 'alpha beta gamma sigma')
parameters = {
"baseline": {
"start_time": "2019-05-01T19:00:02.389Z",
"tags": {
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-05-01T19:00:02.389Z",
"tags": {
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"confidence": 0.5,
"success_criteria": [
{
"metric_name": "iter8_error_rate",
"is_counter": False,
"absent_value": "None",
"min_max": {
"min": 0,
"max": 1
},
"metric_query_template": "sum(increase(istio_requests_total{response_code=~\"5..\",reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"type": "threshold",
"value": 200000,
"stop_on_failure": False
}
]
},
"_last_state": {
"baseline": {
"success_criterion_belief": [params(1, 1, None, None)],
"reward_belief": params(None, None, None, None)
},
"candidate": {
"success_criterion_belief": [params(1, 1, None, None)],
"reward_belief": params(None, None, None, None)
}
}
}
#Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
self.assertEqual(resp.status_code, 200, resp.data)
###################
# Test request with no data for obr (not first iteration + min-max not available)
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with no data for obr (not first iteration + min-max not available)")
params = namedtuple('params', 'alpha beta gamma sigma')
parameters = {
"baseline": {
"start_time": "2019-05-01T19:00:02.389Z",
"tags": {
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-05-01T19:00:02.389Z",
"tags": {
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"confidence": 0.5,
"success_criteria": [
{
"metric_name": "iter8_error_rate",
"is_counter": False,
"absent_value": "None",
"metric_query_template": "sum(increase(istio_requests_total{response_code=~\"5..\",reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"type": "threshold",
"value": 200000,
"stop_on_failure": False
}
]
},
"_last_state": {
"baseline": {
"success_criterion_belief": [params(None, None, 0, 1)],
"reward_belief": params(None, None, None, None)
},
"candidate": {
"success_criterion_belief": [params(None, None, 0, 1)],
"reward_belief": params(None, None, None, None)
}
}
}
#Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
self.assertEqual(resp.status_code, 200, resp.data)
##All tests after this involve the /experiment/check_and_increment endpoint for A/B experiments
def test_experiment_payload_ab_check_and_increment(self):
"""Tests the REST endpoint /experiment/check_and_increment."""
endpoint = f'http://localhost:5555/api/v1/experiment/check_and_increment'
with requests_mock.mock() as m:
m.get(self.metrics_endpoint, json=json.load(open("tests/data/prometheus_sample_response.json")))
###################
# Test request with some required parameters
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with some required parameters")
parameters = {
"baseline": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"success_criteria": [
{
"metric_name": "iter8_error_rate",
"is_counter": False,
"absent_value": "0.0",
"metric_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',response_code=~'5..',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"type": "delta",
"value": 0.02,
"sample_size": 0,
"stop_on_failure": False
}
],
"reward": {
"metric_name": "iter8_error_rate",
"is_counter": False,
"absent_value": "0.0",
"metric_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',response_code=~'5..',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)"
}
},
"_last_state": {}
}
# Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
self.assertEqual(resp.status_code, 200, resp.data)
#All tests after this involve the /experiment/epsilon_t_greedy endpoint for A/B experiments
def test_experiment_payload_ab_epsilon_t_greedy(self):
"""Tests the REST endpoint /experiment/epsilon_t_greedy."""
endpoint = f'http://localhost:5555/api/v1/experiment/epsilon_t_greedy'
with requests_mock.mock() as m:
m.get(self.metrics_endpoint, json=json.load(open("tests/data/prometheus_sample_response.json")))
###################
# Test request with some required parameters
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with some required parameters")
parameters = {
"baseline": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-04-24T19:40:32.017Z",
"tags": {
"destination_service_namespace": "default",
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"success_criteria": [
{
"metric_name": "iter8_error_rate",
"is_counter": False,
"absent_value": "0.0",
"metric_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',response_code=~'5..',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"type": "delta",
"value": 0.02,
"sample_size": 0,
"stop_on_failure": False
}
],
"reward": {
"metric_name": "iter8_error_rate",
"is_counter": False,
"absent_value": "0.0",
"metric_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',response_code=~'5..',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)"
}
},
"_last_state": {}
}
#Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
self.assertEqual(resp.status_code, 200, resp.data)
#All tests after this involve the /experiment/posterior_bayesian_routing endpoint for A/B experiments
def test_experiment_payload_ab_posterior_bayesian_routing(self):
"""Tests the REST endpoint /experiment/posterior_bayesian_routing."""
endpoint = f'http://localhost:5555/api/v1/experiment/posterior_bayesian_routing'
with requests_mock.mock() as m:
m.get(self.metrics_endpoint, json=json.load(open("tests/data/prometheus_sample_response.json")))
###################
# Test request with some required parameters
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with some required parameters")
parameters = {
"baseline": {
"start_time": "2019-05-01T19:00:02.389Z",
"tags": {
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-05-01T19:00:02.389Z",
"tags": {
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"confidence": 0.9,
"success_criteria": [
{
"metric_name": "iter8_error_rate",
"is_counter": False,
"absent_value": "0",
"min_max": {
"min": 0,
"max": 1
},
"metric_query_template": "sum(increase(istio_requests_total{response_code=~\"5..\",reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"type": "threshold",
"value": 0.02,
"stop_on_failure": False
}
],
"reward": {
"metric_name": "iter8_error_rate",
"is_counter": False,
"absent_value": "0.0",
"metric_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',response_code=~'5..',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)"
}
},
"_last_state": {
}
}
#Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
self.assertEqual(resp.status_code, 200, resp.data)
#All tests after this involve the /experiment/optimistic_bayesian_routing endpoint for A/B experiments
def test_experiment_payload_ab_optimistic_bayesian_routing(self):
"""Tests the REST endpoint /experiment/optimistic_bayesian_routing."""
endpoint = f'http://localhost:5555/api/v1/experiment/optimistic_bayesian_routing'
with requests_mock.mock() as m:
m.get(self.metrics_endpoint, json=json.load(open("tests/data/prometheus_sample_response.json")))
###################
# Test request with some required parameters
###################
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test request with some required parameters")
parameters = {
"baseline": {
"start_time": "2019-05-01T19:00:02.389Z",
"tags": {
"destination_workload": "reviews-v1"
}
},
"candidate": {
"start_time": "2019-05-01T19:00:02.389Z",
"tags": {
"destination_workload": "reviews-v3"
}
},
"traffic_control": {
"confidence": 0.9,
"success_criteria": [
{
"metric_name": "iter8_error_rate",
"is_counter": False,
"absent_value": "0",
"min_max": {
"min": 0,
"max": 1
},
"metric_query_template": "sum(increase(istio_requests_total{response_code=~\"5..\",reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)",
"type": "threshold",
"value": 0.02,
"stop_on_failure": False
}
],
"reward": {
"metric_name": "iter8_error_rate",
"is_counter": False,
"absent_value": "0.0",
"metric_query_template": "sum(increase(istio_requests_total{source_workload_namespace!='knative-serving',response_code=~'5..',reporter='source'}[$interval]$offset_str)) by ($entity_labels)",
"metric_sample_size_query_template": "sum(increase(istio_requests_total{reporter=\"source\"}[$interval]$offset_str)) by ($entity_labels)"
}
},
"_last_state": {
}
}
#Call the REST API via the test client
resp = self.flask_test.post(endpoint, json=parameters)
self.assertEqual(resp.status_code, 200, resp.data)
##All tests after this involve the /experiment/algorithms endpoint (until mentioned otherwise)
def test_payload_algorithms(self):
"""Tests the REST endpoint /experiment/algorithms."""
endpoint = f'http://localhost:5555/api/v1/experiment/algorithms'
log.info("\n\n\n")
log.info('===TESTING ENDPOINT {endpoint}'.format(endpoint=endpoint))
log.info("Test algorithms endpoint")
# Call the REST API via the test client
resp = self.flask_test.get(endpoint)
correct_response = {
"check_and_increment": {
"endpoint": "/experiment/check_and_increment"
},
"epsilon_t_greedy": {
"endpoint": "/experiment/epsilon_t_greedy"
},
"posterior_bayesian_routing": {
"endpoint": "/experiment/posterior_bayesian_routing"
},
"optimistic_bayesian_routing": {
"endpoint": "/experiment/optimistic_bayesian_routing"
},
}
self.assertEqual(resp.status_code, 200, resp.data)
self.assertEqual(correct_response, resp.get_json())
| 47.156513
| 364
| 0.470422
| 16,699
| 195,841
| 5.278639
| 0.018145
| 0.018741
| 0.031765
| 0.055588
| 0.983153
| 0.980544
| 0.977992
| 0.976245
| 0.975065
| 0.972921
| 0
| 0.036137
| 0.403562
| 195,841
| 4,153
| 365
| 47.156513
| 0.71851
| 0.059941
| 0
| 0.734615
| 0
| 0.028994
| 0.388394
| 0.184251
| 0
| 0
| 0
| 0
| 0.036982
| 1
| 0.008876
| false
| 0
| 0.005621
| 0
| 0.014793
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c38427e09951f3d8f85339288894d5d7d74d0ccd
| 94
|
py
|
Python
|
simulaqron/start/__init__.py
|
WrathfulSpatula/SimulaQron
|
eaa5548df2f992e187ee70ccd81f192a1ce93e14
|
[
"BSD-3-Clause"
] | 25
|
2017-11-20T08:50:12.000Z
|
2018-07-31T19:02:19.000Z
|
simulaqron/start/__init__.py
|
WrathfulSpatula/SimulaQron
|
eaa5548df2f992e187ee70ccd81f192a1ce93e14
|
[
"BSD-3-Clause"
] | 23
|
2017-11-21T21:47:28.000Z
|
2018-10-03T08:28:41.000Z
|
simulaqron/start/__init__.py
|
WrathfulSpatula/SimulaQron
|
eaa5548df2f992e187ee70ccd81f192a1ce93e14
|
[
"BSD-3-Clause"
] | 13
|
2017-11-20T08:50:14.000Z
|
2018-09-01T21:44:00.000Z
|
from .start_vnode import main as start_vnode
from .start_qnodeos import main as start_qnodeos
| 31.333333
| 48
| 0.851064
| 16
| 94
| 4.75
| 0.4375
| 0.236842
| 0.315789
| 0.447368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12766
| 94
| 2
| 49
| 47
| 0.926829
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
c38ffd51dc2a0fe0075d605f65222930730702a3
| 68,173
|
py
|
Python
|
py-rest-client/product_catalogue_py_rest_client/api/products_l0_dist_api.py
|
ausseabed/product-catalogue
|
3deca0301298efe22d7b61028683254ac3bd3c66
|
[
"Apache-2.0"
] | null | null | null |
py-rest-client/product_catalogue_py_rest_client/api/products_l0_dist_api.py
|
ausseabed/product-catalogue
|
3deca0301298efe22d7b61028683254ac3bd3c66
|
[
"Apache-2.0"
] | 4
|
2022-02-13T10:48:44.000Z
|
2022-03-02T21:22:04.000Z
|
py-rest-client/product_catalogue_py_rest_client/api/products_l0_dist_api.py
|
ausseabed/product-catalogue
|
3deca0301298efe22d7b61028683254ac3bd3c66
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
AusSeabed product catalogue
The API description for the Ausseabed product catalogue inventory # noqa: E501
The version of the OpenAPI document: 0.2.2
Contact: AusSeabed@ga.gov.au
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from product_catalogue_py_rest_client.api_client import ApiClient
from product_catalogue_py_rest_client.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class ProductsL0DistApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def products_l0_dist_controller_create(self, product_l0_src_id, product_l0_dist_dto, **kwargs): # noqa: E501
"""products_l0_dist_controller_create # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.products_l0_dist_controller_create(product_l0_src_id, product_l0_dist_dto, async_req=True)
>>> result = thread.get()
:param product_l0_src_id: (required)
:type product_l0_src_id: float
:param product_l0_dist_dto: (required)
:type product_l0_dist_dto: ProductL0DistDto
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: ProductL0Dist
"""
kwargs['_return_http_data_only'] = True
return self.products_l0_dist_controller_create_with_http_info(product_l0_src_id, product_l0_dist_dto, **kwargs) # noqa: E501
def products_l0_dist_controller_create_with_http_info(self, product_l0_src_id, product_l0_dist_dto, **kwargs): # noqa: E501
"""products_l0_dist_controller_create # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.products_l0_dist_controller_create_with_http_info(product_l0_src_id, product_l0_dist_dto, async_req=True)
>>> result = thread.get()
:param product_l0_src_id: (required)
:type product_l0_src_id: float
:param product_l0_dist_dto: (required)
:type product_l0_dist_dto: ProductL0DistDto
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(ProductL0Dist, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'product_l0_src_id',
'product_l0_dist_dto'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method products_l0_dist_controller_create" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'product_l0_src_id' is set
if self.api_client.client_side_validation and ('product_l0_src_id' not in local_var_params or # noqa: E501
local_var_params['product_l0_src_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `product_l0_src_id` when calling `products_l0_dist_controller_create`") # noqa: E501
# verify the required parameter 'product_l0_dist_dto' is set
if self.api_client.client_side_validation and ('product_l0_dist_dto' not in local_var_params or # noqa: E501
local_var_params['product_l0_dist_dto'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `product_l0_dist_dto` when calling `products_l0_dist_controller_create`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'product_l0_src_id' in local_var_params and local_var_params['product_l0_src_id'] is not None: # noqa: E501
query_params.append(('productL0SrcId', local_var_params['product_l0_src_id'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'product_l0_dist_dto' in local_var_params:
body_params = local_var_params['product_l0_dist_dto']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['access-token'] # noqa: E501
return self.api_client.call_api(
'/products/l0-dist', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ProductL0Dist', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def products_l0_dist_controller_create_instrument(self, product_id, product_l0_instrument_file_dto, **kwargs): # noqa: E501
"""products_l0_dist_controller_create_instrument # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.products_l0_dist_controller_create_instrument(product_id, product_l0_instrument_file_dto, async_req=True)
>>> result = thread.get()
:param product_id: (required)
:type product_id: float
:param product_l0_instrument_file_dto: (required)
:type product_l0_instrument_file_dto: ProductL0InstrumentFileDto
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: ProductL0InstrumentFile
"""
kwargs['_return_http_data_only'] = True
return self.products_l0_dist_controller_create_instrument_with_http_info(product_id, product_l0_instrument_file_dto, **kwargs) # noqa: E501
def products_l0_dist_controller_create_instrument_with_http_info(self, product_id, product_l0_instrument_file_dto, **kwargs): # noqa: E501
"""products_l0_dist_controller_create_instrument # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.products_l0_dist_controller_create_instrument_with_http_info(product_id, product_l0_instrument_file_dto, async_req=True)
>>> result = thread.get()
:param product_id: (required)
:type product_id: float
:param product_l0_instrument_file_dto: (required)
:type product_l0_instrument_file_dto: ProductL0InstrumentFileDto
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(ProductL0InstrumentFile, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'product_id',
'product_l0_instrument_file_dto'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method products_l0_dist_controller_create_instrument" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'product_id' is set
if self.api_client.client_side_validation and ('product_id' not in local_var_params or # noqa: E501
local_var_params['product_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `product_id` when calling `products_l0_dist_controller_create_instrument`") # noqa: E501
# verify the required parameter 'product_l0_instrument_file_dto' is set
if self.api_client.client_side_validation and ('product_l0_instrument_file_dto' not in local_var_params or # noqa: E501
local_var_params['product_l0_instrument_file_dto'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `product_l0_instrument_file_dto` when calling `products_l0_dist_controller_create_instrument`") # noqa: E501
collection_formats = {}
path_params = {}
if 'product_id' in local_var_params:
path_params['productId'] = local_var_params['product_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'product_l0_instrument_file_dto' in local_var_params:
body_params = local_var_params['product_l0_instrument_file_dto']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['access-token'] # noqa: E501
return self.api_client.call_api(
'/products/l0-dist/{productId}/instrument-files', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ProductL0InstrumentFile', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def products_l0_dist_controller_delete(self, product_id, **kwargs): # noqa: E501
"""products_l0_dist_controller_delete # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.products_l0_dist_controller_delete(product_id, async_req=True)
>>> result = thread.get()
:param product_id: (required)
:type product_id: float
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
return self.products_l0_dist_controller_delete_with_http_info(product_id, **kwargs) # noqa: E501
def products_l0_dist_controller_delete_with_http_info(self, product_id, **kwargs): # noqa: E501
"""products_l0_dist_controller_delete # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.products_l0_dist_controller_delete_with_http_info(product_id, async_req=True)
>>> result = thread.get()
:param product_id: (required)
:type product_id: float
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
local_var_params = locals()
all_params = [
'product_id'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method products_l0_dist_controller_delete" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'product_id' is set
if self.api_client.client_side_validation and ('product_id' not in local_var_params or # noqa: E501
local_var_params['product_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `product_id` when calling `products_l0_dist_controller_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'product_id' in local_var_params:
path_params['productId'] = local_var_params['product_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['access-token'] # noqa: E501
return self.api_client.call_api(
'/products/l0-dist/{productId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def products_l0_dist_controller_delete_instrument(self, product_id, instrument_id, **kwargs): # noqa: E501
"""products_l0_dist_controller_delete_instrument # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.products_l0_dist_controller_delete_instrument(product_id, instrument_id, async_req=True)
>>> result = thread.get()
:param product_id: (required)
:type product_id: float
:param instrument_id: (required)
:type instrument_id: float
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
return self.products_l0_dist_controller_delete_instrument_with_http_info(product_id, instrument_id, **kwargs) # noqa: E501
def products_l0_dist_controller_delete_instrument_with_http_info(self, product_id, instrument_id, **kwargs): # noqa: E501
"""products_l0_dist_controller_delete_instrument # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.products_l0_dist_controller_delete_instrument_with_http_info(product_id, instrument_id, async_req=True)
>>> result = thread.get()
:param product_id: (required)
:type product_id: float
:param instrument_id: (required)
:type instrument_id: float
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
local_var_params = locals()
all_params = [
'product_id',
'instrument_id'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method products_l0_dist_controller_delete_instrument" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'product_id' is set
if self.api_client.client_side_validation and ('product_id' not in local_var_params or # noqa: E501
local_var_params['product_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `product_id` when calling `products_l0_dist_controller_delete_instrument`") # noqa: E501
# verify the required parameter 'instrument_id' is set
if self.api_client.client_side_validation and ('instrument_id' not in local_var_params or # noqa: E501
local_var_params['instrument_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `instrument_id` when calling `products_l0_dist_controller_delete_instrument`") # noqa: E501
collection_formats = {}
path_params = {}
if 'product_id' in local_var_params:
path_params['productId'] = local_var_params['product_id'] # noqa: E501
if 'instrument_id' in local_var_params:
path_params['instrumentId'] = local_var_params['instrument_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['access-token'] # noqa: E501
return self.api_client.call_api(
'/products/l0-dist/{productId}/instrument-files/{instrumentId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def products_l0_dist_controller_find_all(self, **kwargs): # noqa: E501
"""products_l0_dist_controller_find_all # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.products_l0_dist_controller_find_all(async_req=True)
>>> result = thread.get()
:param filter_by_product_src_id:
:type filter_by_product_src_id: float
:param snapshot_date_time:
:type snapshot_date_time: date
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: list[ProductL0Dist]
"""
kwargs['_return_http_data_only'] = True
return self.products_l0_dist_controller_find_all_with_http_info(**kwargs) # noqa: E501
def products_l0_dist_controller_find_all_with_http_info(self, **kwargs): # noqa: E501
"""products_l0_dist_controller_find_all # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.products_l0_dist_controller_find_all_with_http_info(async_req=True)
>>> result = thread.get()
:param filter_by_product_src_id:
:type filter_by_product_src_id: float
:param snapshot_date_time:
:type snapshot_date_time: date
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(list[ProductL0Dist], status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'filter_by_product_src_id',
'snapshot_date_time'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method products_l0_dist_controller_find_all" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'filter_by_product_src_id' in local_var_params and local_var_params['filter_by_product_src_id'] is not None: # noqa: E501
query_params.append(('filterByProductSrcId', local_var_params['filter_by_product_src_id'])) # noqa: E501
if 'snapshot_date_time' in local_var_params and local_var_params['snapshot_date_time'] is not None: # noqa: E501
query_params.append(('snapshotDateTime', local_var_params['snapshot_date_time'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['access-token'] # noqa: E501
return self.api_client.call_api(
'/products/l0-dist', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[ProductL0Dist]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def products_l0_dist_controller_find_instruments(self, product_id, **kwargs): # noqa: E501
"""products_l0_dist_controller_find_instruments # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.products_l0_dist_controller_find_instruments(product_id, async_req=True)
>>> result = thread.get()
:param product_id: (required)
:type product_id: float
:param snapshot_date_time:
:type snapshot_date_time: date
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: list[ProductL0InstrumentFile]
"""
kwargs['_return_http_data_only'] = True
return self.products_l0_dist_controller_find_instruments_with_http_info(product_id, **kwargs) # noqa: E501
def products_l0_dist_controller_find_instruments_with_http_info(self, product_id, **kwargs): # noqa: E501
"""products_l0_dist_controller_find_instruments # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.products_l0_dist_controller_find_instruments_with_http_info(product_id, async_req=True)
>>> result = thread.get()
:param product_id: (required)
:type product_id: float
:param snapshot_date_time:
:type snapshot_date_time: date
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(list[ProductL0InstrumentFile], status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'product_id',
'snapshot_date_time'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method products_l0_dist_controller_find_instruments" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'product_id' is set
if self.api_client.client_side_validation and ('product_id' not in local_var_params or # noqa: E501
local_var_params['product_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `product_id` when calling `products_l0_dist_controller_find_instruments`") # noqa: E501
collection_formats = {}
path_params = {}
if 'product_id' in local_var_params:
path_params['productId'] = local_var_params['product_id'] # noqa: E501
query_params = []
if 'snapshot_date_time' in local_var_params and local_var_params['snapshot_date_time'] is not None: # noqa: E501
query_params.append(('snapshotDateTime', local_var_params['snapshot_date_time'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['access-token'] # noqa: E501
return self.api_client.call_api(
'/products/l0-dist/{productId}/instrument-files', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[ProductL0InstrumentFile]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def products_l0_dist_controller_find_one(self, product_id, **kwargs): # noqa: E501
"""products_l0_dist_controller_find_one # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.products_l0_dist_controller_find_one(product_id, async_req=True)
>>> result = thread.get()
:param product_id: (required)
:type product_id: float
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: ProductL0Dist
"""
kwargs['_return_http_data_only'] = True
return self.products_l0_dist_controller_find_one_with_http_info(product_id, **kwargs) # noqa: E501
def products_l0_dist_controller_find_one_with_http_info(self, product_id, **kwargs): # noqa: E501
"""products_l0_dist_controller_find_one # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.products_l0_dist_controller_find_one_with_http_info(product_id, async_req=True)
>>> result = thread.get()
:param product_id: (required)
:type product_id: float
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(ProductL0Dist, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'product_id'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method products_l0_dist_controller_find_one" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'product_id' is set
if self.api_client.client_side_validation and ('product_id' not in local_var_params or # noqa: E501
local_var_params['product_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `product_id` when calling `products_l0_dist_controller_find_one`") # noqa: E501
collection_formats = {}
path_params = {}
if 'product_id' in local_var_params:
path_params['productId'] = local_var_params['product_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['access-token'] # noqa: E501
return self.api_client.call_api(
'/products/l0-dist/{productId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ProductL0Dist', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def products_l0_dist_controller_find_one_instrument(self, product_id, instrument_id, **kwargs): # noqa: E501
"""products_l0_dist_controller_find_one_instrument # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.products_l0_dist_controller_find_one_instrument(product_id, instrument_id, async_req=True)
>>> result = thread.get()
:param product_id: (required)
:type product_id: float
:param instrument_id: (required)
:type instrument_id: float
:param snapshot_date_time:
:type snapshot_date_time: date
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: ProductL0InstrumentFile
"""
kwargs['_return_http_data_only'] = True
return self.products_l0_dist_controller_find_one_instrument_with_http_info(product_id, instrument_id, **kwargs) # noqa: E501
def products_l0_dist_controller_find_one_instrument_with_http_info(self, product_id, instrument_id, **kwargs): # noqa: E501
"""products_l0_dist_controller_find_one_instrument # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.products_l0_dist_controller_find_one_instrument_with_http_info(product_id, instrument_id, async_req=True)
>>> result = thread.get()
:param product_id: (required)
:type product_id: float
:param instrument_id: (required)
:type instrument_id: float
:param snapshot_date_time:
:type snapshot_date_time: date
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(ProductL0InstrumentFile, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'product_id',
'instrument_id',
'snapshot_date_time'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method products_l0_dist_controller_find_one_instrument" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'product_id' is set
if self.api_client.client_side_validation and ('product_id' not in local_var_params or # noqa: E501
local_var_params['product_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `product_id` when calling `products_l0_dist_controller_find_one_instrument`") # noqa: E501
# verify the required parameter 'instrument_id' is set
if self.api_client.client_side_validation and ('instrument_id' not in local_var_params or # noqa: E501
local_var_params['instrument_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `instrument_id` when calling `products_l0_dist_controller_find_one_instrument`") # noqa: E501
collection_formats = {}
path_params = {}
if 'product_id' in local_var_params:
path_params['productId'] = local_var_params['product_id'] # noqa: E501
if 'instrument_id' in local_var_params:
path_params['instrumentId'] = local_var_params['instrument_id'] # noqa: E501
query_params = []
if 'snapshot_date_time' in local_var_params and local_var_params['snapshot_date_time'] is not None: # noqa: E501
query_params.append(('snapshotDateTime', local_var_params['snapshot_date_time'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['access-token'] # noqa: E501
return self.api_client.call_api(
'/products/l0-dist/{productId}/instrument-files/{instrumentId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ProductL0InstrumentFile', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def products_l0_dist_controller_update(self, product_id, product_l0_dist_dto, **kwargs): # noqa: E501
"""products_l0_dist_controller_update # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.products_l0_dist_controller_update(product_id, product_l0_dist_dto, async_req=True)
>>> result = thread.get()
:param product_id: (required)
:type product_id: float
:param product_l0_dist_dto: (required)
:type product_l0_dist_dto: ProductL0DistDto
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
return self.products_l0_dist_controller_update_with_http_info(product_id, product_l0_dist_dto, **kwargs) # noqa: E501
def products_l0_dist_controller_update_with_http_info(self, product_id, product_l0_dist_dto, **kwargs): # noqa: E501
"""products_l0_dist_controller_update # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.products_l0_dist_controller_update_with_http_info(product_id, product_l0_dist_dto, async_req=True)
>>> result = thread.get()
:param product_id: (required)
:type product_id: float
:param product_l0_dist_dto: (required)
:type product_l0_dist_dto: ProductL0DistDto
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
local_var_params = locals()
all_params = [
'product_id',
'product_l0_dist_dto'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method products_l0_dist_controller_update" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'product_id' is set
if self.api_client.client_side_validation and ('product_id' not in local_var_params or # noqa: E501
local_var_params['product_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `product_id` when calling `products_l0_dist_controller_update`") # noqa: E501
# verify the required parameter 'product_l0_dist_dto' is set
if self.api_client.client_side_validation and ('product_l0_dist_dto' not in local_var_params or # noqa: E501
local_var_params['product_l0_dist_dto'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `product_l0_dist_dto` when calling `products_l0_dist_controller_update`") # noqa: E501
collection_formats = {}
path_params = {}
if 'product_id' in local_var_params:
path_params['productId'] = local_var_params['product_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'product_l0_dist_dto' in local_var_params:
body_params = local_var_params['product_l0_dist_dto']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['access-token'] # noqa: E501
return self.api_client.call_api(
'/products/l0-dist/{productId}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def products_l0_dist_controller_update_instrument(self, product_id, instrument_id, product_l0_instrument_file_dto, **kwargs): # noqa: E501
"""products_l0_dist_controller_update_instrument # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.products_l0_dist_controller_update_instrument(product_id, instrument_id, product_l0_instrument_file_dto, async_req=True)
>>> result = thread.get()
:param product_id: (required)
:type product_id: float
:param instrument_id: (required)
:type instrument_id: float
:param product_l0_instrument_file_dto: (required)
:type product_l0_instrument_file_dto: ProductL0InstrumentFileDto
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
return self.products_l0_dist_controller_update_instrument_with_http_info(product_id, instrument_id, product_l0_instrument_file_dto, **kwargs) # noqa: E501
def products_l0_dist_controller_update_instrument_with_http_info(self, product_id, instrument_id, product_l0_instrument_file_dto, **kwargs): # noqa: E501
"""products_l0_dist_controller_update_instrument # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.products_l0_dist_controller_update_instrument_with_http_info(product_id, instrument_id, product_l0_instrument_file_dto, async_req=True)
>>> result = thread.get()
:param product_id: (required)
:type product_id: float
:param instrument_id: (required)
:type instrument_id: float
:param product_l0_instrument_file_dto: (required)
:type product_l0_instrument_file_dto: ProductL0InstrumentFileDto
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
local_var_params = locals()
all_params = [
'product_id',
'instrument_id',
'product_l0_instrument_file_dto'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method products_l0_dist_controller_update_instrument" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'product_id' is set
if self.api_client.client_side_validation and ('product_id' not in local_var_params or # noqa: E501
local_var_params['product_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `product_id` when calling `products_l0_dist_controller_update_instrument`") # noqa: E501
# verify the required parameter 'instrument_id' is set
if self.api_client.client_side_validation and ('instrument_id' not in local_var_params or # noqa: E501
local_var_params['instrument_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `instrument_id` when calling `products_l0_dist_controller_update_instrument`") # noqa: E501
# verify the required parameter 'product_l0_instrument_file_dto' is set
if self.api_client.client_side_validation and ('product_l0_instrument_file_dto' not in local_var_params or # noqa: E501
local_var_params['product_l0_instrument_file_dto'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `product_l0_instrument_file_dto` when calling `products_l0_dist_controller_update_instrument`") # noqa: E501
collection_formats = {}
path_params = {}
if 'product_id' in local_var_params:
path_params['productId'] = local_var_params['product_id'] # noqa: E501
if 'instrument_id' in local_var_params:
path_params['instrumentId'] = local_var_params['instrument_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'product_l0_instrument_file_dto' in local_var_params:
body_params = local_var_params['product_l0_instrument_file_dto']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['access-token'] # noqa: E501
return self.api_client.call_api(
'/products/l0-dist/{productId}/instrument-files/{instrumentId}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
| 47.375261
| 173
| 0.617106
| 7,677
| 68,173
| 5.148756
| 0.027875
| 0.037848
| 0.059149
| 0.058289
| 0.98434
| 0.98396
| 0.980697
| 0.97675
| 0.974144
| 0.969009
| 0
| 0.016081
| 0.314949
| 68,173
| 1,438
| 174
| 47.408206
| 0.830286
| 0.457322
| 0
| 0.777424
| 0
| 0
| 0.220123
| 0.083025
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033386
| false
| 0
| 0.007949
| 0
| 0.074722
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c395ddc05b4930ecb95705f5ff4097832aa237e0
| 10,401
|
py
|
Python
|
expert/utils/filters.py
|
alexhepburn/expert
|
546f7452ced2213ef91e5ce6e7456a1668dd9f95
|
[
"BSD-3-Clause"
] | 1
|
2021-04-10T11:34:22.000Z
|
2021-04-10T11:34:22.000Z
|
expert/utils/filters.py
|
alexhepburn/expert
|
546f7452ced2213ef91e5ce6e7456a1668dd9f95
|
[
"BSD-3-Clause"
] | null | null | null |
expert/utils/filters.py
|
alexhepburn/expert
|
546f7452ced2213ef91e5ce6e7456a1668dd9f95
|
[
"BSD-3-Clause"
] | null | null | null |
"""
The :mod:`expert.utils.filters` module holds filters used in the
:mod:`exeprt.models.pyramids` module.
"""
# Author: Alex Hepburn <alex.hepburn@bristol.ac.uk>
# License: new BSD
import numpy as np
import torch
STEERABLE_SPATIAL_FILTERS = {
'lo0filt': torch.from_numpy(np.array([
[-8.701000e-05, -1.354280e-03, -1.601260e-03, -5.033700e-04,
2.524010e-03, -5.033700e-04, -1.601260e-03, -1.354280e-03,
-8.701000e-05],
[-1.354280e-03, 2.921580e-03, 7.522720e-03, 8.224420e-03, 1.107620e-03,
8.224420e-03, 7.522720e-03, 2.921580e-03, -1.354280e-03],
[-1.601260e-03, 7.522720e-03, -7.061290e-03, -3.769487e-02,
-3.297137e-02, -3.769487e-02, -7.061290e-03, 7.522720e-03,
-1.601260e-03],
[-5.033700e-04, 8.224420e-03, -3.769487e-02, 4.381320e-02, 1.811603e-01,
4.381320e-02, -3.769487e-02, 8.224420e-03, -5.033700e-04],
[2.524010e-03, 1.107620e-03, -3.297137e-02, 1.811603e-01, 4.376250e-01,
1.811603e-01, -3.297137e-02, 1.107620e-03, 2.524010e-03],
[-5.033700e-04, 8.224420e-03, -3.769487e-02, 4.381320e-02, 1.811603e-01,
4.381320e-02, -3.769487e-02, 8.224420e-03, -5.033700e-04],
[-1.601260e-03, 7.522720e-03, -7.061290e-03, -3.769487e-02,
-3.297137e-02, -3.769487e-02, -7.061290e-03, 7.522720e-03,
-1.601260e-03],
[-1.354280e-03, 2.921580e-03, 7.522720e-03, 8.224420e-03, 1.107620e-03,
8.224420e-03, 7.522720e-03, 2.921580e-03, -1.354280e-03],
[-8.701000e-05, -1.354280e-03, -1.601260e-03, -5.033700e-04,
2.524010e-03, -5.033700e-04, -1.601260e-03, -1.354280e-03,
-8.701000e-05]], dtype=np.float32)).reshape((1, 1, 9, 9)),
'hi0filt': torch.from_numpy(np.array([
[-9.570000e-04, -2.424100e-04, -1.424720e-03, -8.742600e-04,
-1.166810e-03, -8.742600e-04, -1.424720e-03, -2.424100e-04,
-9.570000e-04],
[-2.424100e-04, -4.317530e-03, 8.998600e-04, 9.156420e-03, 1.098012e-02,
9.156420e-03, 8.998600e-04, -4.317530e-03, -2.424100e-04],
[-1.424720e-03, 8.998600e-04, 1.706347e-02, 1.094866e-02, -5.897780e-03,
1.094866e-02, 1.706347e-02, 8.998600e-04, -1.424720e-03],
[-8.742600e-04, 9.156420e-03, 1.094866e-02, -7.841370e-02,
-1.562827e-01, -7.841370e-02, 1.094866e-02, 9.156420e-03,
-8.742600e-04],
[-1.166810e-03, 1.098012e-02, -5.897780e-03, -1.562827e-01,
7.282593e-01, -1.562827e-01, -5.897780e-03, 1.098012e-02,
-1.166810e-03],
[-8.742600e-04, 9.156420e-03, 1.094866e-02, -7.841370e-02,
-1.562827e-01, -7.841370e-02, 1.094866e-02, 9.156420e-03,
-8.742600e-04],
[-1.424720e-03, 8.998600e-04, 1.706347e-02, 1.094866e-02, -5.897780e-03,
1.094866e-02, 1.706347e-02, 8.998600e-04, -1.424720e-03],
[-2.424100e-04, -4.317530e-03, 8.998600e-04, 9.156420e-03, 1.098012e-02,
9.156420e-03, 8.998600e-04, -4.317530e-03, -2.424100e-04],
[-9.570000e-04, -2.424100e-04, -1.424720e-03, -8.742600e-04,
-1.166810e-03, -8.742600e-04, -1.424720e-03, -2.424100e-04,
-9.570000e-04]], dtype=np.float32)).reshape((1, 1, 9, 9)),
'lofilt': torch.from_numpy(np.array([
[-4.350000e-05, 1.207800e-04, -6.771400e-04, -1.243400e-04, -8.006400e-04,
-1.597040e-03, -2.516800e-04, -4.202000e-04, 1.262000e-03, -4.202000e-04,
-2.516800e-04, -1.597040e-03, -8.006400e-04, -1.243400e-04, -6.771400e-04,
1.207800e-04, -4.350000e-05],
[1.207800e-04, 4.460600e-04, -5.814600e-04, 5.621600e-04, -1.368800e-04,
2.325540e-03, 2.889860e-03, 4.287280e-03, 5.589400e-03, 4.287280e-03,
2.889860e-03, 2.325540e-03, -1.368800e-04, 5.621600e-04, -5.814600e-04,
4.460600e-04, 1.207800e-04],
[-6.771400e-04, -5.814600e-04, 1.460780e-03, 2.160540e-03, 3.761360e-03,
3.080980e-03, 4.112200e-03, 2.221220e-03, 5.538200e-04, 2.221220e-03,
4.112200e-03, 3.080980e-03, 3.761360e-03, 2.160540e-03, 1.460780e-03,
-5.814600e-04, -6.771400e-04],
[-1.243400e-04, 5.621600e-04, 2.160540e-03, 3.175780e-03, 3.184680e-03,
-1.777480e-03, -7.431700e-03, -9.056920e-03, -9.637220e-03, -9.056920e-03,
-7.431700e-03, -1.777480e-03, 3.184680e-03, 3.175780e-03, 2.160540e-03,
5.621600e-04, -1.243400e-04],
[-8.006400e-04, -1.368800e-04, 3.761360e-03, 3.184680e-03, -3.530640e-03,
-1.260420e-02, -1.884744e-02, -1.750818e-02, -1.648568e-02, -1.750818e-02,
-1.884744e-02, -1.260420e-02, -3.530640e-03, 3.184680e-03, 3.761360e-03,
-1.368800e-04, -8.006400e-04],
[-1.597040e-03, 2.325540e-03, 3.080980e-03, -1.777480e-03, -1.260420e-02,
-2.022938e-02, -1.109170e-02, 3.955660e-03, 1.438512e-02, 3.955660e-03,
-1.109170e-02, -2.022938e-02, -1.260420e-02, -1.777480e-03, 3.080980e-03,
2.325540e-03, -1.597040e-03],
[-2.516800e-04, 2.889860e-03, 4.112200e-03, -7.431700e-03, -1.884744e-02,
-1.109170e-02, 2.190660e-02, 6.806584e-02, 9.058014e-02, 6.806584e-02,
2.190660e-02, -1.109170e-02, -1.884744e-02, -7.431700e-03, 4.112200e-03,
2.889860e-03, -2.516800e-04],
[-4.202000e-04, 4.287280e-03, 2.221220e-03, -9.056920e-03, -1.750818e-02,
3.955660e-03, 6.806584e-02, 1.445500e-01, 1.773651e-01, 1.445500e-01,
6.806584e-02, 3.955660e-03, -1.750818e-02, -9.056920e-03, 2.221220e-03,
4.287280e-03, -4.202000e-04],
[1.262000e-03, 5.589400e-03, 5.538200e-04, -9.637220e-03, -1.648568e-02,
1.438512e-02, 9.058014e-02, 1.773651e-01, 2.120374e-01, 1.773651e-01,
9.058014e-02, 1.438512e-02, -1.648568e-02, -9.637220e-03, 5.538200e-04,
5.589400e-03, 1.262000e-03],
[-4.202000e-04, 4.287280e-03, 2.221220e-03, -9.056920e-03, -1.750818e-02,
3.955660e-03, 6.806584e-02, 1.445500e-01, 1.773651e-01, 1.445500e-01,
6.806584e-02, 3.955660e-03, -1.750818e-02, -9.056920e-03, 2.221220e-03,
4.287280e-03, -4.202000e-04],
[-2.516800e-04, 2.889860e-03, 4.112200e-03, -7.431700e-03, -1.884744e-02,
-1.109170e-02, 2.190660e-02, 6.806584e-02, 9.058014e-02, 6.806584e-02,
2.190660e-02, -1.109170e-02, -1.884744e-02, -7.431700e-03, 4.112200e-03,
2.889860e-03, -2.516800e-04],
[-1.597040e-03, 2.325540e-03, 3.080980e-03, -1.777480e-03, -1.260420e-02,
-2.022938e-02, -1.109170e-02, 3.955660e-03, 1.438512e-02, 3.955660e-03,
-1.109170e-02, -2.022938e-02, -1.260420e-02, -1.777480e-03, 3.080980e-03,
2.325540e-03, -1.597040e-03],
[-8.006400e-04, -1.368800e-04, 3.761360e-03, 3.184680e-03, -3.530640e-03,
-1.260420e-02, -1.884744e-02, -1.750818e-02, -1.648568e-02, -1.750818e-02,
-1.884744e-02, -1.260420e-02, -3.530640e-03, 3.184680e-03, 3.761360e-03,
-1.368800e-04, -8.006400e-04],
[-1.243400e-04, 5.621600e-04, 2.160540e-03, 3.175780e-03, 3.184680e-03,
-1.777480e-03, -7.431700e-03, -9.056920e-03, -9.637220e-03, -9.056920e-03,
-7.431700e-03, -1.777480e-03, 3.184680e-03, 3.175780e-03, 2.160540e-03,
5.621600e-04, -1.243400e-04],
[-6.771400e-04, -5.814600e-04, 1.460780e-03, 2.160540e-03, 3.761360e-03,
3.080980e-03, 4.112200e-03, 2.221220e-03, 5.538200e-04, 2.221220e-03,
4.112200e-03, 3.080980e-03, 3.761360e-03, 2.160540e-03, 1.460780e-03,
-5.814600e-04, -6.771400e-04],
[1.207800e-04, 4.460600e-04, -5.814600e-04, 5.621600e-04, -1.368800e-04,
2.325540e-03, 2.889860e-03, 4.287280e-03, 5.589400e-03, 4.287280e-03,
2.889860e-03, 2.325540e-03, -1.368800e-04, 5.621600e-04, -5.814600e-04,
4.460600e-04, 1.207800e-04],
[-4.350000e-05, 1.207800e-04, -6.771400e-04, -1.243400e-04, -8.006400e-04,
-1.597040e-03, -2.516800e-04, -4.202000e-04, 1.262000e-03, -4.202000e-04,
-2.516800e-04, -1.597040e-03, -8.006400e-04, -1.243400e-04, -6.771400e-04,
1.207800e-04, -4.350000e-05]], dtype=np.float32)).reshape((1, 1, 17, 17)),
'bfilts': torch.from_numpy(np.array([
[[6.125880e-03, -8.052600e-03, -2.103714e-02, -1.536890e-02, -1.851466e-02,
-1.536890e-02, -2.103714e-02, -8.052600e-03, 6.125880e-03],
[-1.287416e-02, -9.611520e-03, 1.023569e-02, 6.009450e-03, 1.872620e-03,
6.009450e-03, 1.023569e-02, -9.611520e-03, -1.287416e-02],
[-5.641530e-03, 4.168400e-03, -2.382180e-02, -5.375324e-02, -2.076086e-02,
-5.375324e-02, -2.382180e-02, 4.168400e-03, -5.641530e-03],
[-8.957260e-03, -1.751170e-03, -1.836909e-02, 1.265655e-01, 2.996168e-01,
1.265655e-01, -1.836909e-02, -1.751170e-03, -8.957260e-03],
[0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00,
0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00],
[8.957260e-03, 1.751170e-03, 1.836909e-02, -1.265655e-01, -2.996168e-01,
-1.265655e-01, 1.836909e-02, 1.751170e-03, 8.957260e-03],
[5.641530e-03, -4.168400e-03, 2.382180e-02, 5.375324e-02, 2.076086e-02,
5.375324e-02, 2.382180e-02, -4.168400e-03, 5.641530e-03],
[1.287416e-02, 9.611520e-03, -1.023569e-02, -6.009450e-03, -1.872620e-03,
-6.009450e-03, -1.023569e-02, 9.611520e-03, 1.287416e-02],
[-6.125880e-03, 8.052600e-03, 2.103714e-02, 1.536890e-02, 1.851466e-02,
1.536890e-02, 2.103714e-02, 8.052600e-03, -6.125880e-03]],
[[-6.125880e-03, 1.287416e-02, 5.641530e-03, 8.957260e-03, 0.000000e+00,
-8.957260e-03, -5.641530e-03, -1.287416e-02, 6.125880e-03],
[8.052600e-03, 9.611520e-03, -4.168400e-03, 1.751170e-03, 0.000000e+00,
-1.751170e-03, 4.168400e-03, -9.611520e-03, -8.052600e-03],
[2.103714e-02, -1.023569e-02, 2.382180e-02, 1.836909e-02, 0.000000e+00,
-1.836909e-02, -2.382180e-02, 1.023569e-02, -2.103714e-02],
[1.536890e-02, -6.009450e-03, 5.375324e-02, -1.265655e-01, 0.000000e+00,
1.265655e-01, -5.375324e-02, 6.009450e-03, -1.536890e-02],
[1.851466e-02, -1.872620e-03, 2.076086e-02, -2.996168e-01, 0.000000e+00,
2.996168e-01, -2.076086e-02, 1.872620e-03, -1.851466e-02],
[1.536890e-02, -6.009450e-03, 5.375324e-02, -1.265655e-01, 0.000000e+00,
1.265655e-01, -5.375324e-02, 6.009450e-03, -1.536890e-02],
[2.103714e-02, -1.023569e-02, 2.382180e-02, 1.836909e-02, 0.000000e+00,
-1.836909e-02, -2.382180e-02, 1.023569e-02, -2.103714e-02],
[8.052600e-03, 9.611520e-03, -4.168400e-03, 1.751170e-03, 0.000000e+00,
-1.751170e-03, 4.168400e-03, -9.611520e-03, -8.052600e-03],
[-6.125880e-03, 1.287416e-02, 5.641530e-03, 8.957260e-03, 0.000000e+00,
-8.957260e-03, -5.641530e-03, -1.287416e-02,
6.125880e-03]]], dtype=np.float32)).reshape((2, 1, 9, 9))
}
| 61.910714
| 80
| 0.627728
| 1,931
| 10,401
| 3.378042
| 0.07768
| 0.034493
| 0.027595
| 0.014717
| 0.91277
| 0.895907
| 0.889161
| 0.881036
| 0.873371
| 0.873371
| 0
| 0.622964
| 0.144217
| 10,401
| 167
| 81
| 62.281437
| 0.109875
| 0.016345
| 0
| 0.696203
| 0
| 0
| 0.002544
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.012658
| 0
| 0.012658
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
c39f99cdd35db74da46acfe289cd3df43c93d6c1
| 11,350
|
py
|
Python
|
tests/backends/test_dbscript.py
|
MattToast/SmartSim
|
4bd5e231445abd9b888561930db859062708678a
|
[
"BSD-2-Clause"
] | null | null | null |
tests/backends/test_dbscript.py
|
MattToast/SmartSim
|
4bd5e231445abd9b888561930db859062708678a
|
[
"BSD-2-Clause"
] | null | null | null |
tests/backends/test_dbscript.py
|
MattToast/SmartSim
|
4bd5e231445abd9b888561930db859062708678a
|
[
"BSD-2-Clause"
] | null | null | null |
import sys
import pytest
from smartsim import Experiment, status
from smartsim._core.utils import installed_redisai_backends
from smartsim.error.errors import SSUnsupportedError
should_run = True
try:
import torch
except ImportError:
should_run = False
should_run &= "torch" in installed_redisai_backends()
def timestwo(x):
return 2 * x
@pytest.mark.skipif(not should_run, reason="Test needs Torch to run")
def test_db_script(fileutils, wlmutils):
"""Test DB scripts on remote DB"""
exp_name = "test-db-script"
# get test setup
test_dir = fileutils.make_test_dir()
sr_test_script = fileutils.get_test_conf_path("run_dbscript_smartredis.py")
torch_script = fileutils.get_test_conf_path("torchscript.py")
exp = Experiment(exp_name, exp_path=test_dir, launcher="local")
# create colocated model
run_settings = exp.create_run_settings(exe=sys.executable, exe_args=sr_test_script)
smartsim_model = exp.create_model("smartsim_model", run_settings)
smartsim_model.set_path(test_dir)
db = exp.create_database(port=wlmutils.get_test_port(), interface="lo")
exp.generate(db)
torch_script_str = "def negate(x):\n\treturn torch.neg(x)\n"
smartsim_model.add_script("test_script1", script_path=torch_script, device="CPU")
smartsim_model.add_script("test_script2", script=torch_script_str, device="CPU")
smartsim_model.add_function("test_func", function=timestwo, device="CPU")
# Assert we have all three models
assert len(smartsim_model._db_scripts) == 3
exp.start(db, smartsim_model, block=True)
statuses = exp.get_status(smartsim_model)
exp.stop(db)
assert all([stat == status.STATUS_COMPLETED for stat in statuses])
@pytest.mark.skipif(not should_run, reason="Test needs Torch to run")
def test_db_script_ensemble(fileutils, wlmutils):
"""Test DB scripts on remote DB"""
exp_name = "test-db-script"
# get test setup
test_dir = fileutils.make_test_dir()
sr_test_script = fileutils.get_test_conf_path("run_dbscript_smartredis.py")
torch_script = fileutils.get_test_conf_path("torchscript.py")
exp = Experiment(exp_name, exp_path=test_dir, launcher="local")
# create colocated model
run_settings = exp.create_run_settings(exe=sys.executable, exe_args=sr_test_script)
ensemble = exp.create_ensemble(
"dbscript_ensemble", run_settings=run_settings, replicas=2
)
ensemble.set_path(test_dir)
smartsim_model = exp.create_model("smartsim_model", run_settings)
smartsim_model.set_path(test_dir)
db = exp.create_database(port=wlmutils.get_test_port(), interface="lo")
exp.generate(db)
torch_script_str = "def negate(x):\n\treturn torch.neg(x)\n"
ensemble.add_script("test_script1", script_path=torch_script, device="CPU")
for entity in ensemble:
entity.disable_key_prefixing()
entity.add_script("test_script2", script=torch_script_str, device="CPU")
ensemble.add_function("test_func", function=timestwo, device="CPU")
# Ensemble must add all available DBScripts to new entity
ensemble.add_model(smartsim_model)
smartsim_model.add_script("test_script2", script=torch_script_str, device="CPU")
# Assert we have added both models to the ensemble
assert len(ensemble._db_scripts) == 2
# Assert we have added all three models to entities in ensemble
assert all([len(entity._db_scripts) == 3 for entity in ensemble])
exp.start(db, ensemble, block=True)
statuses = exp.get_status(ensemble)
exp.stop(db)
assert all([stat == status.STATUS_COMPLETED for stat in statuses])
@pytest.mark.skipif(not should_run, reason="Test needs Torch to run")
def test_colocated_db_script(fileutils, wlmutils):
"""Test DB Scripts on colocated DB"""
exp_name = "test-colocated-db-script"
exp = Experiment(exp_name, launcher="local")
# get test setup
test_dir = fileutils.make_test_dir()
sr_test_script = fileutils.get_test_conf_path("run_dbscript_smartredis.py")
torch_script = fileutils.get_test_conf_path("torchscript.py")
# create colocated model
colo_settings = exp.create_run_settings(exe=sys.executable, exe_args=sr_test_script)
colo_model = exp.create_model("colocated_model", colo_settings)
colo_model.set_path(test_dir)
colo_model.colocate_db(
port=wlmutils.get_test_port(), db_cpus=1, limit_app_cpus=False, debug=True, ifname="lo"
)
torch_script_str = "def negate(x):\n\treturn torch.neg(x)\n"
colo_model.add_script("test_script1", script_path=torch_script, device="CPU")
colo_model.add_script("test_script2", script=torch_script_str, device="CPU")
# Assert we have added both models
assert len(colo_model._db_scripts) == 2
for db_script in colo_model._db_scripts:
print(db_script)
exp.start(colo_model, block=True)
statuses = exp.get_status(colo_model)
assert all([stat == status.STATUS_COMPLETED for stat in statuses])
@pytest.mark.skipif(not should_run, reason="Test needs Torch to run")
def test_colocated_db_script_ensemble(fileutils, wlmutils):
"""Test DB Scripts on colocated DB from ensemble, first colocating DB,
then adding script.
"""
exp_name = "test-colocated-db-script"
exp = Experiment(exp_name, launcher="local")
# get test setup
test_dir = fileutils.make_test_dir()
sr_test_script = fileutils.get_test_conf_path("run_dbscript_smartredis.py")
torch_script = fileutils.get_test_conf_path("torchscript.py")
# create colocated model
colo_settings = exp.create_run_settings(exe=sys.executable, exe_args=sr_test_script)
colo_ensemble = exp.create_ensemble(
"colocated_ensemble", run_settings=colo_settings, replicas=2
)
colo_ensemble.set_path(test_dir)
colo_model = exp.create_model("colocated_model", colo_settings)
colo_model.set_path(test_dir)
for i, entity in enumerate(colo_ensemble):
entity.disable_key_prefixing()
entity.colocate_db(
port=wlmutils.get_test_port() + i, db_cpus=1, limit_app_cpus=False, debug=True, ifname="lo"
)
entity.add_script("test_script1", script_path=torch_script, device="CPU")
colo_model.colocate_db(
port=wlmutils.get_test_port() + len(colo_ensemble),
db_cpus=1,
limit_app_cpus=False,
debug=True,
ifname="lo",
)
torch_script_str = "def negate(x):\n\treturn torch.neg(x)\n"
colo_ensemble.add_script("test_script2", script=torch_script_str, device="CPU")
colo_ensemble.add_model(colo_model)
colo_model.add_script("test_script1", script_path=torch_script, device="CPU")
# Assert we have added one model to the ensemble
assert len(colo_ensemble._db_scripts) == 1
# Assert we have added both models to each entity
assert all([len(entity._db_scripts) == 2 for entity in colo_ensemble])
exp.start(colo_ensemble, block=True)
statuses = exp.get_status(colo_ensemble)
assert all([stat == status.STATUS_COMPLETED for stat in statuses])
@pytest.mark.skipif(not should_run, reason="Test needs Torch to run")
def test_colocated_db_script_ensemble_reordered(fileutils, wlmutils):
"""Test DB Scripts on colocated DB from ensemble, first adding the
script to the ensemble, then colocating the DB"""
exp_name = "test-colocated-db-script"
exp = Experiment(exp_name, launcher="local")
# get test setup
test_dir = fileutils.make_test_dir()
sr_test_script = fileutils.get_test_conf_path("run_dbscript_smartredis.py")
torch_script = fileutils.get_test_conf_path("torchscript.py")
# create colocated model
colo_settings = exp.create_run_settings(exe=sys.executable, exe_args=sr_test_script)
colo_ensemble = exp.create_ensemble(
"colocated_ensemble", run_settings=colo_settings, replicas=2
)
colo_ensemble.set_path(test_dir)
colo_model = exp.create_model("colocated_model", colo_settings)
colo_model.set_path(test_dir)
torch_script_str = "def negate(x):\n\treturn torch.neg(x)\n"
colo_ensemble.add_script("test_script2", script=torch_script_str, device="CPU")
for i, entity in enumerate(colo_ensemble):
entity.disable_key_prefixing()
entity.colocate_db(
port=wlmutils.get_test_port() + i, db_cpus=1, limit_app_cpus=False, debug=True, ifname="lo"
)
entity.add_script("test_script1", script_path=torch_script, device="CPU")
colo_model.colocate_db(
port=wlmutils.get_test_port() + len(colo_ensemble),
db_cpus=1,
limit_app_cpus=False,
debug=True,
ifname="lo",
)
colo_ensemble.add_model(colo_model)
colo_model.add_script("test_script1", script_path=torch_script, device="CPU")
# Assert we have added one model to the ensemble
assert len(colo_ensemble._db_scripts) == 1
# Assert we have added both models to each entity
assert all([len(entity._db_scripts) == 2 for entity in colo_ensemble])
exp.start(colo_ensemble, block=True)
statuses = exp.get_status(colo_ensemble)
assert all([stat == status.STATUS_COMPLETED for stat in statuses])
@pytest.mark.skipif(not should_run, reason="Test needs Torch to run")
def test_db_script_errors(fileutils, wlmutils):
"""Test DB Scripts error when setting a serialized function on colocated DB"""
exp_name = "test-colocated-db-script"
exp = Experiment(exp_name, launcher="local")
# get test setup
test_dir = fileutils.make_test_dir()
sr_test_script = fileutils.get_test_conf_path("run_dbscript_smartredis.py")
# create colocated model
colo_settings = exp.create_run_settings(exe=sys.executable, exe_args=sr_test_script)
colo_model = exp.create_model("colocated_model", colo_settings)
colo_model.set_path(test_dir)
colo_model.colocate_db(
port=wlmutils.get_test_port(), db_cpus=1, limit_app_cpus=False, debug=True, ifname="lo"
)
with pytest.raises(SSUnsupportedError):
colo_model.add_function("test_func", function=timestwo, device="CPU")
# create colocated model
colo_settings = exp.create_run_settings(exe=sys.executable, exe_args=sr_test_script)
colo_ensemble = exp.create_ensemble(
"colocated_ensemble", run_settings=colo_settings, replicas=2
)
colo_ensemble.set_path(test_dir)
for i, entity in enumerate(colo_ensemble):
entity.colocate_db(
port=wlmutils.get_test_port() + i, db_cpus=1, limit_app_cpus=False, debug=True, ifname="lo"
)
with pytest.raises(SSUnsupportedError):
colo_ensemble.add_function("test_func", function=timestwo, device="CPU")
# create colocated model
colo_settings = exp.create_run_settings(exe=sys.executable, exe_args=sr_test_script)
colo_ensemble = exp.create_ensemble(
"colocated_ensemble", run_settings=colo_settings, replicas=2
)
colo_ensemble.set_path(test_dir)
colo_ensemble.add_function("test_func", function=timestwo, device="CPU")
for i, entity in enumerate(colo_ensemble):
with pytest.raises(SSUnsupportedError):
entity.colocate_db(
port=wlmutils.get_test_port() + i, db_cpus=1, limit_app_cpus=False, debug=True, ifname="lo"
)
with pytest.raises(SSUnsupportedError):
colo_ensemble.add_model(colo_model)
| 35.691824
| 107
| 0.728458
| 1,605
| 11,350
| 4.85919
| 0.084735
| 0.044621
| 0.021541
| 0.03103
| 0.888704
| 0.875882
| 0.865624
| 0.851391
| 0.847545
| 0.828696
| 0
| 0.00371
| 0.168899
| 11,350
| 317
| 108
| 35.804416
| 0.823068
| 0.093216
| 0
| 0.714286
| 0
| 0
| 0.114414
| 0.024643
| 0
| 0
| 0
| 0
| 0.066327
| 1
| 0.035714
| false
| 0
| 0.035714
| 0.005102
| 0.076531
| 0.005102
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c3ce4562e38da3d87475f9b00208c5001e46cc28
| 45
|
py
|
Python
|
angr/utils/__init__.py
|
vwvw/angr
|
956b75c1908b786d51a5af3a29a96b5ef35c756e
|
[
"BSD-2-Clause"
] | 2
|
2019-12-20T13:42:57.000Z
|
2021-07-07T09:34:46.000Z
|
angr/utils/__init__.py
|
vwvw/angr
|
956b75c1908b786d51a5af3a29a96b5ef35c756e
|
[
"BSD-2-Clause"
] | 1
|
2019-04-08T12:10:07.000Z
|
2019-04-08T12:10:07.000Z
|
angr/utils/__init__.py
|
vwvw/angr
|
956b75c1908b786d51a5af3a29a96b5ef35c756e
|
[
"BSD-2-Clause"
] | null | null | null |
from . import graph
from . import constants
| 11.25
| 23
| 0.755556
| 6
| 45
| 5.666667
| 0.666667
| 0.588235
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 45
| 3
| 24
| 15
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
7f0b2f14b6bde2c09e0fad6c1033ffbf1527396e
| 1,365
|
py
|
Python
|
examples/structureData/flagellar/0-coot-history.py
|
anapgsilva/rt1
|
c1b61e7081952f1c3053322becac47b649ffa216
|
[
"MIT"
] | 5
|
2018-06-27T08:54:25.000Z
|
2020-12-03T14:54:15.000Z
|
examples/structureData/flagellar/0-coot-history.py
|
anapgsilva/rt1
|
c1b61e7081952f1c3053322becac47b649ffa216
|
[
"MIT"
] | null | null | null |
examples/structureData/flagellar/0-coot-history.py
|
anapgsilva/rt1
|
c1b61e7081952f1c3053322becac47b649ffa216
|
[
"MIT"
] | 1
|
2019-08-02T03:23:26.000Z
|
2019-08-02T03:23:26.000Z
|
filter_fileselection_filenames_state ()
get_active_map_drag_flag ()
use_graphics_interface_state ()
set_display_intro_string ("Good Morning Calsmi, Welcome to Coot. (revision 5348)
[with guile 1.8.8 embedded]
[with python 2.7.9 embedded]")
set_filter_fileselection_filenames (1)
unset_sticky_sort_by_date ()
set_colour_map_rotation_on_read_pdb (21.00)
set_colour_map_rotation_on_read_pdb_c_only_flag (1)
set_density_size (15.00)
set_swap_difference_map_colours (0)
set_colour_map_rotation_for_map (14.00)
set_active_map_drag_flag (0)
set_idle_function_rotate_angle ( 1.50)
filter_fileselection_filenames_state ()
get_active_map_drag_flag ()
use_graphics_interface_state ()
set_display_intro_string ("Good morning Callum. Welcome to Coot")
set_display_lists_for_maps (0)
set_filter_fileselection_filenames (1)
unset_sticky_sort_by_date ()
set_colour_map_rotation_on_read_pdb (21.00)
set_colour_map_rotation_on_read_pdb_c_only_flag (1)
set_density_size (15.00)
set_swap_difference_map_colours (0)
set_colour_map_rotation_for_map (14.00)
set_active_map_drag_flag (0)
set_idle_function_rotate_angle ( 1.50)
filter_fileselection_filenames_state ()
get_active_map_drag_flag ()
handle_read_draw_molecule_with_recentre ("shaftTrunc.pdb", 1)
use_graphics_interface_state ()
coot_checked_exit (0)
;; # DIRECT SCHEME ((clear-backups-maybe))
stereo_mode_state ()
save_state ()
| 35.921053
| 80
| 0.852015
| 226
| 1,365
| 4.561947
| 0.349558
| 0.052376
| 0.069835
| 0.116392
| 0.751697
| 0.751697
| 0.751697
| 0.751697
| 0.751697
| 0.751697
| 0
| 0.040189
| 0.07033
| 1,365
| 37
| 81
| 36.891892
| 0.772262
| 0.027106
| 0
| 0.72973
| 0
| 0
| 0.037707
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
613223f2499bd9b5ecb653aa5f2e0f8d0bbf1cd8
| 3,952
|
py
|
Python
|
DynamicProgramme/MaximumSubarray_53.py
|
RunningMartin/LeetCode
|
ec4ff24bfd8658b733496452478114ad69872c0f
|
[
"MIT"
] | null | null | null |
DynamicProgramme/MaximumSubarray_53.py
|
RunningMartin/LeetCode
|
ec4ff24bfd8658b733496452478114ad69872c0f
|
[
"MIT"
] | null | null | null |
DynamicProgramme/MaximumSubarray_53.py
|
RunningMartin/LeetCode
|
ec4ff24bfd8658b733496452478114ad69872c0f
|
[
"MIT"
] | null | null | null |
from typing import List, Tuple
"""
https://leetcode-cn.com/problems/maximum-subarray/
"""
class Solution:
@staticmethod
def max_sub_array_v1(nums: List[int]) -> Tuple[int, List]:
"""
依次遍历,统计所有子序列的和,然后找出最大值
时间复杂度:O(n^2)
空间复杂度:O(n^2)
:param nums:
:return: 最大子序列和,最大子序列
"""
if not nums:
return 0, []
sub_arrays = list()
for i in range(len(nums)):
for j in range(i, len(nums)):
sub_arrays.append(nums[i:j + 1])
max_sum = sum(sub_arrays[0])
sub_array = sub_arrays[0]
for array in sub_arrays:
if sum(array) > sum(sub_array):
max_sum = sum(array)
sub_array = array
return max_sum, sub_array
@staticmethod
def max_sub_array_sum_v1(nums: List[int]) -> int:
"""
依次遍历,统计所有子序列的和,然后找出最大值
时间复杂度:O(n^2)
空间复杂度:O(n^2)
:param nums:
:return: 最大子序列和
"""
if not nums:
return 0
sub_arrays = list()
for i in range(len(nums)):
for j in range(i, len(nums)):
sub_arrays.append(nums[i:j + 1])
max_sum = sum(sub_arrays[0])
max_sub_array = sub_arrays[0]
for array in sub_arrays:
if sum(array) > sum(max_sub_array):
max_sum = sum(array)
max_sub_array = array
return max_sum
@staticmethod
def max_sub_array_v2(nums: List[int]) -> Tuple[int, List]:
"""
V1版本中,计算子序列和时,可以采用如下策略剪枝:
1、如果当前的子序列和<0,则不需要以当前子序列为基础往里添加新的元素
2、只有当前子序列和大于0时,才可能让以它为基础的子序列和更大
时间复杂度:O(n)
空间复杂度:O(n)
:param nums:
:return: 最大子序列和,最大子序列
"""
if not nums:
return 0, []
sub_arrays = list()
array = []
for num in nums:
array.append(num)
sub_arrays.append(array[:])
if sum(array) < 0:
array = []
max_sum = sum(sub_arrays[0])
sub_array = sub_arrays[0]
for array in sub_arrays:
if sum(array) > sum(sub_array):
max_sum = sum(array)
sub_array = array
return max_sum, sub_array
@staticmethod
def max_sub_array_sum_v2(nums: List[int]) -> int:
"""
V1版本中,计算子序列和时,可以采用如下策略剪枝:
1、如果当前的子序列和<0,则不需要以当前子序列为基础往里添加新的元素
2、只有当前子序列和大于0时,才可能让以它为基础的子序列和更大
时间复杂度:O(n)
空间复杂度:O(n)
:param nums:
:return: 最大子序列和,最大子序列
"""
if not nums:
return 0
sub_arrays = list()
array = []
for num in nums:
array.append(num)
sub_arrays.append(array[:])
if sum(array) < 0:
array = []
max_sum = sum(sub_arrays[0])
max_sub_array = sub_arrays[0]
for array in sub_arrays:
if sum(array) > sum(max_sub_array):
max_sum = sum(array)
max_sub_array = array
return max_sum
@staticmethod
def max_sub_array_v3(nums: List[int]) -> int:
"""
V2版本的空间复杂度为O(n),如果只需要最大序列和,则我们可以利用原序列,统计sum,将空间复杂度控制为O(1)
时间复杂度:O(n)
空间复杂度:O(1)
:param nums:
:return: 最大子序列和
"""
if not nums:
return 0
max_sum = nums[0]
for i in range(1, len(nums)):
if nums[i - 1] >= 0:
nums[i] = nums[i] + nums[i - 1]
max_sum = max(max_sum, nums[i])
return max_sum
def test():
nums = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
assert Solution.max_sub_array_v1(nums) == (6, [4, -1, 2, 1])
assert Solution.max_sub_array_sum_v1(nums) == 6
assert Solution.max_sub_array_v2(nums) == (6, [4, -1, 2, 1])
assert Solution.max_sub_array_sum_v2(nums) == 6
assert Solution.max_sub_array_v3(nums) == 6
if __name__ == '__main__':
test()
| 26.346667
| 65
| 0.515435
| 508
| 3,952
| 3.816929
| 0.137795
| 0.09902
| 0.090768
| 0.043321
| 0.85508
| 0.811759
| 0.779783
| 0.74884
| 0.74884
| 0.729758
| 0
| 0.028457
| 0.368674
| 3,952
| 149
| 66
| 26.52349
| 0.748697
| 0.144737
| 0
| 0.758621
| 0
| 0
| 0.002654
| 0
| 0
| 0
| 0
| 0
| 0.057471
| 1
| 0.068966
| false
| 0
| 0.011494
| 0
| 0.206897
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f63fed701e5200b0df2fa4e5f244af6d43635e70
| 23,238
|
py
|
Python
|
lib/player/test_lib.py
|
jPhy/Gomoku
|
019a672c080c4d9f390d4d2765453780ef2cea63
|
[
"MIT"
] | 7
|
2016-06-10T17:35:42.000Z
|
2020-01-03T09:51:19.000Z
|
lib/player/test_lib.py
|
jPhy/Gomoku
|
019a672c080c4d9f390d4d2765453780ef2cea63
|
[
"MIT"
] | null | null | null |
lib/player/test_lib.py
|
jPhy/Gomoku
|
019a672c080c4d9f390d4d2765453780ef2cea63
|
[
"MIT"
] | null | null | null |
"Unit tests for the Random player"
import unittest
import numpy as np
from ..board import Board, black, white, empty
from ..gui import BoardGui, tk
from .lib import Playerlibrary, PlayerTest
build_gui = PlayerTest.build_gui
build_board = PlayerTest.build_board
class TestBuildBoard(unittest.TestCase):
def test_ensure_no_winner(self):
board_array = [[empty, black, empty, black, empty],
[white, white, white, white, white],
[black, empty, black, empty, black]]
self.assertRaises(AssertionError, build_board, board_array)
def test_build_equal_black_white(self):
board_array = [[empty, black, empty, empty, empty, empty, empty, empty, empty, empty],
[empty, empty, empty, empty, empty, empty, empty, empty, empty, empty],
[white, empty, empty, empty, empty, empty, white, empty, empty, empty],
[white, empty, empty, empty, empty, white, empty, empty, empty, empty],
[black, empty, empty, empty, empty, empty, empty, empty, empty, empty],
[empty, empty, empty, empty, empty, black, black, empty, empty, empty]]
board = build_board(board_array)
np.testing.assert_equal(board.board, np.array(board_array))
self.assertEqual(board.moves_left, 10 * 6 - 8)
def test_build_one_more_white_than_black(self):
board_array = [[empty, black, empty, empty, empty, empty, empty, empty, empty, empty],
[empty, empty, empty, empty, empty, empty, empty, empty, empty, empty],
[white, empty, empty, empty, empty, empty, white, empty, empty, empty],
[white, empty, empty, empty, empty, white, empty, white, empty, empty],
[black, empty, empty, empty, empty, empty, empty, empty, empty, empty],
[empty, empty, empty, empty, empty, black, black, empty, empty, empty]]
board = build_board(board_array)
np.testing.assert_equal(board.board, np.array(board_array))
self.assertEqual(board.moves_left, 10 * 6 - 9)
class TestPlayerlibrary(unittest.TestCase):
def setUp(self):
np.random.seed(894763834)
self.white_player = Playerlibrary()
self.white_player.color = white
self.black_player = Playerlibrary()
self.black_player.color = black
def test_win_if_possible(self):
boards_to_test = \
[
# row - white
[[white, white, empty, white, white, empty, empty, empty, empty, empty],
[black, empty, black, black, black, empty, empty, empty, empty, empty],
[empty, empty, empty, empty, empty, empty, empty, empty, empty, empty],
[empty, empty, empty, empty, empty, empty, empty, empty, empty, empty],
[empty, empty, empty, empty, empty, empty, empty, empty, empty, empty],
[empty, empty, empty, empty, empty, empty, empty, empty, empty, empty]],
# column - black
[[white, black, white, empty, empty, empty, empty, empty, empty, empty],
[empty, black, empty, empty, empty, empty, empty, empty, empty, empty],
[white, empty, empty, empty, empty, empty, empty, empty, empty, empty],
[white, black, empty, empty, empty, empty, empty, empty, empty, empty],
[white, black, empty, empty, empty, empty, empty, empty, empty, empty],
[empty, empty, empty, empty, empty, empty, empty, empty, empty, empty]],
# diagonal - black
[[white, white, empty, empty, empty, white, empty, empty, empty, empty],
[empty, black, empty, empty, empty, empty, empty, empty, empty, empty],
[empty, empty, black, white, empty, empty, empty, empty, empty, empty],
[empty, white, empty, black, empty, empty, empty, empty, empty, empty],
[empty, empty, empty, empty, empty, empty, empty, empty, empty, empty],
[empty, empty, empty, empty, empty, black, empty, empty, empty, empty]],
# diagonal - white
[[empty, empty, empty, empty, empty, black, empty, empty, empty, empty],
[empty, empty, empty, empty, empty, empty, empty, white, empty, empty],
[empty, black, empty, empty, empty, empty, white, empty, empty, empty],
[empty, empty, empty, empty, empty, white, empty, empty, black, empty],
[empty, empty, empty, empty, white, empty, black, empty, empty, empty],
[empty, empty, empty, empty, empty, empty, empty, empty, empty, empty]],
# not immediately winnable - white
[[empty, empty, empty, empty, empty, black, empty, empty, empty, empty],
[empty, empty, empty, empty, empty, empty, empty, white, empty, empty],
[empty, black, empty, empty, empty, empty, empty, empty, empty, empty],
[empty, empty, white, empty, empty, white, empty, empty, black, empty],
[empty, empty, empty, empty, white, empty, black, empty, empty, empty],
[empty, empty, empty, empty, empty, empty, empty, empty, empty, empty]]
]
for board_array in boards_to_test[:-1]:
gui = build_gui(board_array)
if gui.board.in_turn == white:
return_value = self.white_player.win_if_possible(gui)
self.assertEqual(gui.board.winner()[0], white)
elif gui.board.in_turn == black:
return_value = self.black_player.win_if_possible(gui)
self.assertEqual(gui.board.winner()[0], black)
else:
raise RuntimeError('FATAL ERROR')
self.assertTrue(return_value)
gui = build_gui(boards_to_test[-1])
return_value = self.black_player.win_if_possible(gui)
self.assertFalse(return_value)
self.assertEqual(gui.board.moves_left, 52)
def test_random_move(self):
height = 10
width = 20
gui = build_gui(np.zeros((height,width)))
total_number_of_fields = height * width
assert total_number_of_fields % 2 == 0 # need an even number for following loop
for i in range(total_number_of_fields // 2):
self.assertEqual(gui.board.board.sum(), 0)
self.white_player.random_move(gui)
self.assertEqual(gui.board.board.sum(), white)
self.black_player.random_move(gui)
self.assertEqual(gui.board.board.sum(), 0)
def test_extend_one(self):
gui = build_gui([[empty, empty, empty],
[empty, black, empty],
[empty, empty, white],
[empty, empty, empty],
[empty, empty, empty]])
target_board_after_moves = np.array \
(
[[empty, empty, empty],
[empty, black, white],
[empty, empty, white],
[empty, empty, empty],
[empty, empty, empty]]
)
white_return = self.white_player.extend_one(gui)
self.assertTrue(white_return)
np.testing.assert_equal(gui.board.board, target_board_after_moves)
def test_extend_three_to_four(self):
gui = build_gui([[empty, empty, empty, empty, empty, empty, empty, empty, empty, empty],
[empty, empty, black, empty, empty, empty, empty, empty, empty, empty],
[empty, empty, black, empty, empty, empty, white, white, empty, empty],
[empty, empty, empty, empty, white, empty, empty, white, empty, empty],
[empty, black, black, empty, empty, empty, empty, white, empty, empty],
[empty, empty, empty, empty, empty, empty, empty, empty, empty, empty]])
target_board_after_moves = np.array \
(
[[empty, empty, empty, empty, empty, empty, empty, empty, empty, empty],
[empty, empty, black, empty, empty, empty, empty, empty, empty, empty],
[empty, empty, black, empty, empty, empty, white, white, empty, empty],
[empty, empty, black, empty, white, empty, empty, white, empty, empty],
[empty, black, black, empty, empty, empty, empty, white, empty, empty],
[empty, empty, empty, empty, empty, empty, empty, empty, empty, empty]]
)
black_return = self.black_player.extend_three_to_four(gui)
self.assertTrue(black_return)
np.testing.assert_equal(gui.board.board, target_board_after_moves)
def test_block_to_doubly_open_four(self):
gui = build_gui([[empty, empty, empty, empty, empty, empty, empty, empty, empty, empty],
[empty, empty, black, empty, empty, empty, empty, black, empty, empty],
[empty, empty, black, empty, empty, empty, empty, white, empty, empty],
[empty, empty, empty, empty, white, empty, empty, white, empty, empty],
[empty, empty, black, empty, empty, empty, empty, white, empty, empty],
[empty, empty, empty, empty, empty, empty, empty, empty, empty, empty]])
target_board_after_moves = np.array \
(
[[empty, empty, empty, empty, empty, empty, empty, empty, empty, empty],
[empty, empty, black, empty, empty, empty, empty, black, empty, empty],
[empty, empty, black, empty, empty, empty, empty, white, empty, empty],
[empty, empty, white, empty, white, empty, empty, white, empty, empty],
[empty, empty, black, empty, empty, empty, empty, white, empty, empty],
[empty, empty, empty, empty, empty, empty, empty, empty, empty, empty]]
)
white_return = self.white_player.block_to_doubly_open_four(gui)
self.assertTrue(white_return)
np.testing.assert_equal(gui.board.board, target_board_after_moves)
def test_extend_three_to_doubly_open_four(self):
gui = build_gui([[empty, empty, empty, empty, empty, empty, empty, empty, empty, empty],
[empty, empty, black, empty, empty, empty, empty, black, empty, empty],
[empty, empty, black, empty, empty, empty, white, white, empty, empty],
[empty, empty, empty, empty, white, empty, empty, white, empty, empty],
[empty, empty, black, empty, empty, empty, empty, white, empty, empty],
[empty, empty, empty, empty, empty, empty, empty, empty, empty, empty]])
target_board_after_moves = np.array \
(
[[empty, empty, empty, empty, empty, empty, empty, empty, empty, empty],
[empty, empty, black, empty, empty, empty, empty, black, empty, empty],
[empty, empty, black, empty, empty, empty, white, white, empty, empty],
[empty, empty, black, empty, white, empty, empty, white, empty, empty],
[empty, empty, black, empty, empty, empty, empty, white, empty, empty],
[empty, empty, empty, empty, empty, empty, empty, empty, empty, empty]]
)
black_return = self.black_player.extend_three_to_doubly_open_four(gui)
self.assertTrue(black_return)
np.testing.assert_equal(gui.board.board, target_board_after_moves)
white_return = self.white_player.extend_three_to_doubly_open_four(gui)
self.assertFalse(white_return)
np.testing.assert_equal(gui.board.board, target_board_after_moves)
def test_block_open_four(self):
gui = build_gui([[empty, empty, empty, empty, white, empty, empty, empty, empty, empty],
[empty, empty, black, empty, empty, empty, empty, empty, empty, empty],
[empty, empty, white, black, white, empty, empty, empty, empty, empty],
[empty, empty, empty, empty, black, empty, empty, empty, empty, empty],
[empty, empty, empty, empty, empty, black, empty, empty, empty, empty],
[empty, empty, empty, empty, empty, white, empty, empty, empty, empty]])
target_board_after_moves = np.array \
(
[[empty, white, empty, empty, white, empty, empty, empty, empty, empty],
[empty, empty, black, empty, empty, empty, empty, empty, empty, empty],
[empty, empty, white, black, white, empty, empty, empty, empty, empty],
[empty, empty, empty, empty, black, empty, empty, empty, empty, empty],
[empty, empty, empty, empty, empty, black, empty, empty, empty, empty],
[empty, empty, empty, empty, empty, white, empty, empty, empty, empty]]
)
white_return = self.white_player.block_open_four(gui)
self.assertTrue(white_return)
np.testing.assert_equal(gui.board.board, target_board_after_moves)
def test_block_open_two(self):
gui = build_gui([[empty, empty, empty, empty, empty],
[empty, empty, black, black, empty],
[empty, white, white, empty, empty],
[empty, empty, empty, empty, empty]])
target_board_after_moves = np.array \
(
[[empty, empty, empty, empty, empty],
[empty, white, black, black, empty],
[empty, white, white, empty, empty],
[empty, empty, empty, empty, empty]]
)
white_return = self.white_player.block_open_two(gui)
self.assertTrue(white_return)
np.testing.assert_equal(gui.board.board, target_board_after_moves)
def test_block_doubly_open_two(self):
gui = build_gui([[empty, empty, empty, empty, empty, empty, empty, empty, empty, empty],
[empty, empty, black, empty, empty, empty, empty, empty, empty, empty],
[empty, empty, black, empty, empty, empty, white, empty, empty, empty],
[empty, empty, empty, empty, white, empty, empty, empty, empty, empty],
[empty, black, empty, empty, empty, empty, empty, white, empty, empty],
[empty, empty, empty, empty, empty, empty, empty, empty, empty, empty]])
target_board_after_moves = np.array \
(
[[empty, empty, empty, empty, empty, empty, empty, empty, empty, empty],
[empty, empty, black, empty, empty, empty, empty, empty, empty, empty],
[empty, empty, black, empty, empty, empty, white, empty, empty, empty],
[empty, empty, white, empty, white, empty, empty, empty, empty, empty],
[empty, black, empty, empty, empty, empty, empty, white, empty, empty],
[empty, empty, empty, empty, empty, empty, empty, empty, empty, empty]]
)
white_return = self.white_player.block_doubly_open_two(gui)
black_return = self.black_player.block_doubly_open_two(gui)
self.assertTrue(white_return) # white can block open two
self.assertFalse(black_return) # black has no open two to block
np.testing.assert_equal(gui.board.board, target_board_after_moves)
def test_block_doubly_open_three(self):
gui = build_gui([[empty, empty, empty, empty, empty, empty, empty, empty, empty, empty],
[empty, empty, white, black, empty, empty, empty, empty, empty, empty],
[empty, empty, empty, white, black, empty, empty, empty, empty, empty],
[empty, empty, empty, empty, white, black, empty, empty, empty, empty],
[empty, empty, empty, empty, empty, empty, white, empty, empty, empty],
[empty, empty, empty, empty, empty, empty, empty, empty, empty, empty]])
target_board_after_moves = np.array \
(
[[empty, black, empty, empty, empty, empty, empty, empty, empty, empty],
[empty, empty, white, black, empty, empty, empty, empty, empty, empty],
[empty, empty, empty, white, black, empty, empty, empty, empty, empty],
[empty, empty, empty, empty, white, black, empty, empty, empty, empty],
[empty, empty, empty, empty, empty, empty, white, empty, empty, empty],
[empty, empty, empty, empty, empty, empty, empty, empty, empty, empty]]
)
black_return = self.black_player.block_doubly_open_three(gui)
white_return = self.white_player.block_doubly_open_three(gui)
self.assertTrue(black_return) # white has a doubly open line of three to be blocked
np.testing.assert_equal(gui.board.board, target_board_after_moves)
self.assertFalse(white_return) # black's line of three is already blocked on one side
np.testing.assert_equal(gui.board.board, target_board_after_moves)
def test_block_open_three(self):
gui = build_gui([[empty, empty, empty, empty, empty, empty, empty, empty, empty, empty],
[empty, empty, white, empty, black, empty, empty, empty, empty, empty],
[empty, empty, empty, white, empty, empty, empty, empty, empty, empty],
[empty, empty, empty, empty, white, empty, empty, black, empty, empty],
[empty, empty, empty, empty, empty, empty, empty, empty, empty, empty],
[empty, empty, empty, empty, empty, empty, empty, empty, empty, empty]])
target_board_after_moves = np.array \
(
[[empty, black, empty, empty, empty, empty, empty, empty, empty, empty],
[empty, empty, white, empty, black, empty, empty, empty, empty, empty],
[empty, empty, empty, white, empty, empty, empty, empty, empty, empty],
[empty, empty, empty, empty, white, empty, empty, black, empty, empty],
[empty, empty, empty, empty, empty, empty, empty, empty, empty, empty],
[empty, empty, empty, empty, empty, empty, empty, empty, empty, empty]]
)
black_return = self.black_player.block_open_three(gui)
self.assertTrue(black_return) # white can block open two
np.testing.assert_equal(gui.board.board, target_board_after_moves)
def test_extend_two_to_three(self):
gui = build_gui([[empty, empty, empty, empty, empty, empty, empty, white, empty, empty],
[empty, empty, black, empty, empty, empty, empty, empty, empty, empty],
[empty, empty, empty, empty, empty, empty, white, empty, empty, empty],
[empty, empty, black, empty, white, empty, empty, empty, empty, empty],
[empty, black, empty, empty, empty, empty, empty, white, empty, empty],
[empty, empty, empty, empty, empty, empty, empty, empty, empty, empty]])
target_board_after_moves = np.array \
(
[[empty, empty, empty, empty, empty, empty, empty, white, empty, empty],
[empty, empty, black, empty, empty, empty, empty, empty, empty, empty],
[empty, empty, empty, empty, empty, empty, white, empty, empty, empty],
[empty, empty, black, empty, white, empty, empty, empty, empty, empty],
[empty, black, black, empty, empty, empty, empty, white, empty, empty],
[empty, empty, empty, empty, empty, empty, empty, empty, empty, empty]]
)
black_return = self.black_player.extend_two_to_three(gui)
self.assertTrue(black_return)
np.testing.assert_equal(gui.board.board, target_board_after_moves)
def test_block_twice_to_three_or_more(self):
gui = build_gui([[empty, empty, white, empty, empty, empty, empty, empty],
[empty, white, empty, white, white, empty, empty, empty],
[empty, black, empty, white, empty, empty, empty, empty],
[empty, empty, empty, empty, empty, empty, empty, empty],
[empty, empty, black, black, empty, empty, empty, empty],
[empty, empty, black, empty, black, empty, empty, empty]])
target_board_after_moves = np.array \
(
[[empty, empty, white, empty, empty, empty, empty, empty],
[empty, white, empty, white, white, empty, empty, empty],
[empty, black, empty, white, empty, empty, empty, empty],
[empty, empty, white, empty, empty, empty, empty, empty],
[empty, empty, black, black, empty, empty, empty, empty],
[empty, empty, black, empty, black, empty, empty, empty]]
)
white_return = self.white_player.block_twice_to_three_or_more(gui)
self.assertTrue(white_return)
np.testing.assert_equal(gui.board.board, target_board_after_moves)
def test_extend_twice_two_to_three(self):
gui = build_gui([[empty, empty, empty, white, empty, empty, empty, empty],
[empty, empty, empty, empty, empty, empty, empty, empty],
[empty, white, empty, black, empty, empty, black, empty],
[empty, empty, empty, empty, black, empty, white, empty],
[empty, empty, empty, white, black, empty, empty, empty],
[empty, empty, white, empty, empty, empty, empty, empty]])
target_board_after_moves = np.array \
(
[[empty, empty, empty, white, empty, empty, empty, empty],
[empty, empty, empty, empty, empty, empty, empty, empty],
[empty, white, empty, black, black, empty, black, empty],
[empty, empty, empty, empty, black, empty, white, empty],
[empty, empty, empty, white, black, empty, empty, empty],
[empty, empty, white, empty, empty, empty, empty, empty]]
)
black_return = self.black_player.extend_twice_two_to_three(gui)
self.assertTrue(black_return)
np.testing.assert_equal(gui.board.board, target_board_after_moves)
| 56.816626
| 97
| 0.563517
| 2,601
| 23,238
| 4.908497
| 0.045752
| 0.922691
| 1.154931
| 1.264197
| 0.898253
| 0.891909
| 0.879377
| 0.873502
| 0.852197
| 0.845461
| 0
| 0.00203
| 0.321671
| 23,238
| 408
| 98
| 56.955882
| 0.807905
| 0.015148
| 0
| 0.551205
| 0
| 0
| 0.001877
| 0
| 0
| 0
| 0
| 0
| 0.129518
| 1
| 0.054217
| false
| 0
| 0.01506
| 0
| 0.075301
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
f66be853eb44e8c342bb92218df3bfc732976f3f
| 101
|
py
|
Python
|
openrec/utils/__init__.py
|
BoData-Bot/openrec
|
3d655d21b762b40d50e53cea96d7802fd49c74ad
|
[
"Apache-2.0"
] | null | null | null |
openrec/utils/__init__.py
|
BoData-Bot/openrec
|
3d655d21b762b40d50e53cea96d7802fd49c74ad
|
[
"Apache-2.0"
] | null | null | null |
openrec/utils/__init__.py
|
BoData-Bot/openrec
|
3d655d21b762b40d50e53cea96d7802fd49c74ad
|
[
"Apache-2.0"
] | null | null | null |
from openrec.utils.dataset import Dataset
from openrec.utils.implicit_dataset import ImplicitDataset
| 33.666667
| 58
| 0.881188
| 13
| 101
| 6.769231
| 0.538462
| 0.25
| 0.363636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.079208
| 101
| 2
| 59
| 50.5
| 0.946237
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
9c8572de03e0734a64522963252460ad6f544273
| 14,548
|
py
|
Python
|
q2_cutadapt/tests/test_trim.py
|
andrewsanchez/q2-cutadapt
|
15735d12d4553e85c34474175c4daabc31e44cb4
|
[
"BSD-3-Clause"
] | null | null | null |
q2_cutadapt/tests/test_trim.py
|
andrewsanchez/q2-cutadapt
|
15735d12d4553e85c34474175c4daabc31e44cb4
|
[
"BSD-3-Clause"
] | null | null | null |
q2_cutadapt/tests/test_trim.py
|
andrewsanchez/q2-cutadapt
|
15735d12d4553e85c34474175c4daabc31e44cb4
|
[
"BSD-3-Clause"
] | null | null | null |
# ----------------------------------------------------------------------------
# Copyright (c) 2017-2020, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import gzip
import itertools
import os
import unittest
import pandas as pd
from q2_cutadapt._trim import _build_trim_command
from q2_types.per_sample_sequences import (
CasavaOneEightSingleLanePerSampleDirFmt,
SingleLanePerSampleSingleEndFastqDirFmt,
SingleLanePerSamplePairedEndFastqDirFmt,
FastqGzFormat,
)
from qiime2 import Artifact
from qiime2.util import redirected_stdio
from qiime2.plugin.testing import TestPluginBase
class TestTrimSingle(TestPluginBase):
package = 'q2_cutadapt.tests'
# This test is really just to make sure that the command runs - the
# detailed tests in the Util Tests below ensure the commands are crafted
# appropriately.
def test_typical(self):
demuxed_art = Artifact.import_data('SampleData[SequencesWithQuality]',
self.get_data_path('single-end'))
adapter = ['TACGGAGGATCC']
with redirected_stdio(stdout=os.devnull):
obs_art, = self.plugin.methods['trim_single'](demuxed_art,
front=adapter)
demuxed = demuxed_art.view(SingleLanePerSampleSingleEndFastqDirFmt)
demuxed_seqs = demuxed.sequences.iter_views(FastqGzFormat)
obs = obs_art.view(SingleLanePerSampleSingleEndFastqDirFmt)
obs_seqs = obs.sequences.iter_views(FastqGzFormat)
# Iterate over each sample, side-by-side
for (_, exp_fp), (_, obs_fp) in zip(demuxed_seqs, obs_seqs):
exp_fh = gzip.open(str(exp_fp), 'rt')
obs_fh = gzip.open(str(obs_fp), 'rt')
# Iterate over expected and observed reads, side-by-side
for records in itertools.zip_longest(*[exp_fh] * 4, *[obs_fh] * 4):
(exp_seq_h, exp_seq, _, exp_qual,
obs_seq_h, obs_seq, _, obs_qual) = records
# Make sure cutadapt hasn't shuffled the read order
self.assertEqual(exp_seq_h, obs_seq_h)
self.assertTrue(obs_seq in exp_seq)
# The adapter should not be present in the trimmed seqs
self.assertTrue('TACGGAGGATCC' not in obs_seq)
self.assertTrue(obs_qual in exp_qual)
# Make sure cutadapt trimmed the quality scores, too
self.assertEqual(len(obs_seq), len(obs_qual))
exp_fh.close(), obs_fh.close()
def test_min_length(self):
demuxed_art = Artifact.import_data('SampleData[SequencesWithQuality]',
self.get_data_path('single-end'))
# The following "adapter" has been picked specifically to remove
# the entire sequence with the ID @HWI-EAS440_0386:1:28:6491:1375#0/1.
adapter = ['GGGGGGATCGGGGGCG']
empty_seq_id = '@HWI-EAS440_0386:1:28:6491:1375#0/1'
with redirected_stdio(stdout=os.devnull):
obs_art, = self.plugin.methods['trim_single'](demuxed_art,
adapter=adapter)
obs = obs_art.view(SingleLanePerSampleSingleEndFastqDirFmt)
for _, obs_fp in obs.sequences.iter_views(FastqGzFormat):
with gzip.open(str(obs_fp), 'rt') as obs_fh:
for record in itertools.zip_longest(*[obs_fh] * 4):
self.assertTrue(record[0] != empty_seq_id)
class TestTrimPaired(TestPluginBase):
package = 'q2_cutadapt.tests'
# This test is really just to make sure that the command runs - the
# detailed tests in the Util Tests below ensure the commands are crafted
# appropriately.
def test_typical(self):
demuxed_art = Artifact.import_data(
'SampleData[PairedEndSequencesWithQuality]',
self.get_data_path('paired-end'))
adapter = ['TACGGAGGATCC']
with redirected_stdio(stdout=os.devnull):
# The forward and reverse reads are identical in these data
obs_art, = self.plugin.methods['trim_paired'](demuxed_art,
front_f=adapter,
front_r=adapter)
demuxed = demuxed_art.view(SingleLanePerSampleSingleEndFastqDirFmt)
demuxed_seqs = demuxed.sequences.iter_views(FastqGzFormat)
obs = obs_art.view(SingleLanePerSampleSingleEndFastqDirFmt)
obs_seqs = obs.sequences.iter_views(FastqGzFormat)
# Iterate over each sample, side-by-side
for (_, exp_fp), (_, obs_fp) in zip(demuxed_seqs, obs_seqs):
exp_fh = gzip.open(str(exp_fp), 'rt')
obs_fh = gzip.open(str(obs_fp), 'rt')
# Iterate over expected and observed reads, side-by-side
for records in itertools.zip_longest(*[exp_fh] * 4, *[obs_fh] * 4):
(exp_seq_h, exp_seq, _, exp_qual,
obs_seq_h, obs_seq, _, obs_qual) = records
# Make sure cutadapt hasn't shuffled the read order
self.assertEqual(exp_seq_h, obs_seq_h)
self.assertTrue(obs_seq in exp_seq)
# The adapter should not be present in the trimmed seqs
self.assertTrue('TACGGAGGATCC' not in obs_seq)
self.assertTrue(obs_qual in exp_qual)
# Make sure cutadapt trimmed the quality scores, too
self.assertEqual(len(obs_seq), len(obs_qual))
exp_fh.close(), obs_fh.close()
def test_unordered(self):
demuxed_art = Artifact.import_data(
'SampleData[PairedEndSequencesWithQuality]',
self.get_data_path('paired-end-unordered'))
with redirected_stdio(stdout=os.devnull):
# The forward and reverse reads are identical in these data
obs_art, = self.plugin.methods['trim_paired'](demuxed_art,
front_f=['TTTT'],
front_r=['AAAA'])
demuxed = demuxed_art.view(SingleLanePerSampleSingleEndFastqDirFmt)
demuxed_seqs = demuxed.sequences.iter_views(FastqGzFormat)
obs = obs_art.view(SingleLanePerSampleSingleEndFastqDirFmt)
obs_seqs = obs.sequences.iter_views(FastqGzFormat)
# Iterate over each sample, side-by-side
for (_, exp_fp), (_, obs_fp) in zip(demuxed_seqs, obs_seqs):
exp_fh = gzip.open(str(exp_fp), 'rt')
obs_fh = gzip.open(str(obs_fp), 'rt')
# Iterate over expected and observed reads, side-by-side
for records in itertools.zip_longest(*[exp_fh] * 4, *[obs_fh] * 4):
(exp_seq_h, exp_seq, _, exp_qual,
obs_seq_h, obs_seq, _, obs_qual) = records
# The adapter should not be present in the trimmed seqs
if 'R1_001.fastq' in str(obs_fp):
self.assertNotIn('TTTT', obs_seq)
else:
self.assertNotIn('AAAA', obs_seq)
self.assertTrue(obs_qual in exp_qual)
# Make sure cutadapt trimmed the quality scores, too
self.assertEqual(len(obs_seq), len(obs_qual))
exp_fh.close(), obs_fh.close()
class TestTrimUtilsSingle(TestPluginBase):
package = 'q2_cutadapt.tests'
def setUp(self):
super().setUp()
self.demux_seqs = SingleLanePerSampleSingleEndFastqDirFmt(
self.get_data_path('single-end'), mode='r')
self.trimmed_seqs = CasavaOneEightSingleLanePerSampleDirFmt()
def test_build_trim_command_typical(self):
df = self.demux_seqs.manifest.view(pd.DataFrame)
for _, fwd in df.itertuples():
obs = _build_trim_command(fwd, None,
self.trimmed_seqs,
cores=0,
adapter_f=['AAAA'],
front_f=['GGGG'],
anywhere_f=['CCCC'],
error_rate=2,
indels=False,
times=3,
overlap=4,
match_read_wildcards=True,
match_adapter_wildcards=False,
minimum_length=2,
discard_untrimmed=True)
obs = ' '.join(obs)
self.assertTrue('-o %s' % str(self.trimmed_seqs.path / fwd[0])
in obs)
self.assertTrue('--cores 0' in obs)
self.assertTrue('--adapter AAAA' in obs)
self.assertTrue('--front GGGG' in obs)
self.assertTrue('--anywhere CCCC' in obs)
self.assertTrue('--error-rate 2' in obs)
self.assertTrue('--times 3' in obs)
self.assertTrue('--overlap 4' in obs)
self.assertTrue('--no-indels' in obs)
self.assertTrue('--match-read-wildcards' in obs)
self.assertTrue('--no-match-adapter-wildcards' in obs)
self.assertTrue('--minimum-length 2' in obs)
self.assertTrue('--discard-untrimmed' in obs)
self.assertTrue(str(self.demux_seqs) in obs)
def test_build_trim_command_multiple_adapters(self):
df = self.demux_seqs.manifest.view(pd.DataFrame)
for _, fwd in df.itertuples():
obs = _build_trim_command(fwd, None,
self.trimmed_seqs,
adapter_f=['AAAA', 'GGGG', 'CCCC'])
obs = ' '.join(obs)
self.assertTrue('--adapter AAAA' in obs)
self.assertTrue('--adapter GGGG' in obs)
self.assertTrue('--adapter CCCC' in obs)
self.assertTrue('--front' not in obs)
self.assertTrue('--anywhere' not in obs)
def test_build_trim_command_no_adapters_or_flags(self):
df = self.demux_seqs.manifest.view(pd.DataFrame)
for _, fwd in df.itertuples():
obs = _build_trim_command(fwd, None,
self.trimmed_seqs)
obs = ' '.join(obs)
self.assertTrue('--adapter' not in obs)
self.assertTrue('--front' not in obs)
self.assertTrue('--anywhere' not in obs)
self.assertTrue('--no-indels' not in obs)
self.assertTrue('--match-read-wildcards' not in obs)
self.assertTrue('--no-match-adapter-wildcards' not in obs)
self.assertTrue('--minimum-length 1' in obs)
self.assertTrue('--discard-untrimmed' not in obs)
class TestTrimUtilsPaired(TestPluginBase):
package = 'q2_cutadapt.tests'
def setUp(self):
super().setUp()
self.demux_seqs = SingleLanePerSamplePairedEndFastqDirFmt(
self.get_data_path('paired-end'), mode='r')
self.trimmed_seqs = CasavaOneEightSingleLanePerSampleDirFmt()
def test_build_trim_command_typical(self):
df = self.demux_seqs.manifest.view(pd.DataFrame)
for _, fwd, rev in df.itertuples():
obs = _build_trim_command(fwd, rev,
self.trimmed_seqs,
cores=0,
adapter_f=['AAAA'],
front_f=['GGGG'],
anywhere_f=['CCCC'],
adapter_r=['TTTT'],
front_r=['CCCC'],
anywhere_r=['GGGG'],
error_rate=2,
indels=False,
times=3,
overlap=4,
match_read_wildcards=True,
match_adapter_wildcards=False,
minimum_length=2,
discard_untrimmed=True)
obs = ' '.join(obs)
self.assertTrue('-o %s' % str(self.trimmed_seqs.path / fwd[0])
in obs)
self.assertTrue('-p %s' % str(self.trimmed_seqs.path / rev[0])
in obs)
self.assertTrue('--cores 0' in obs)
self.assertTrue('--adapter AAAA' in obs)
self.assertTrue('--front GGGG' in obs)
self.assertTrue('--anywhere CCCC' in obs)
self.assertTrue('-A TTTT' in obs)
self.assertTrue('-G CCCC' in obs)
self.assertTrue('-B GGGG' in obs)
self.assertTrue('--error-rate 2' in obs)
self.assertTrue('--times 3' in obs)
self.assertTrue('--overlap 4' in obs)
self.assertTrue('--no-indels' in obs)
self.assertTrue('--match-read-wildcards' in obs)
self.assertTrue('--no-match-adapter-wildcards' in obs)
self.assertTrue('--minimum-length 2' in obs)
self.assertTrue('--discard-untrimmed' in obs)
self.assertTrue(str(self.demux_seqs) in obs)
def test_build_trim_command_multiple_adapters(self):
df = self.demux_seqs.manifest.view(pd.DataFrame)
for _, fwd, rev in df.itertuples():
obs = _build_trim_command(fwd, rev, self.trimmed_seqs,
adapter_f=['AAAA', 'GGGG', 'CCCC'],
adapter_r=['TTTT', 'CCCC', 'GGGG'])
obs = ' '.join(obs)
self.assertTrue('--adapter AAAA' in obs)
self.assertTrue('--adapter GGGG' in obs)
self.assertTrue('--adapter CCCC' in obs)
self.assertTrue('-A TTTT' in obs)
self.assertTrue('-A CCCC' in obs)
self.assertTrue('-A GGGG' in obs)
self.assertTrue('--front' not in obs)
self.assertTrue('--anywhere' not in obs)
self.assertTrue('-G' not in obs)
self.assertTrue('-B' not in obs)
self.assertTrue('--discard-trimmed' not in obs)
if __name__ == '__main__':
unittest.main()
| 47.542484
| 79
| 0.551622
| 1,570
| 14,548
| 4.924841
| 0.13949
| 0.115882
| 0.123125
| 0.125323
| 0.85269
| 0.82954
| 0.801216
| 0.793456
| 0.78285
| 0.777936
| 0
| 0.009823
| 0.342246
| 14,548
| 305
| 80
| 47.698361
| 0.798203
| 0.108743
| 0
| 0.711297
| 0
| 0
| 0.095879
| 0.025593
| 0
| 0
| 0
| 0
| 0.297071
| 1
| 0.046025
| false
| 0
| 0.058577
| 0
| 0.138075
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9cb9590b1299755c2fa42b34b98c947911ebc335
| 299
|
py
|
Python
|
metaworld/envs/__init__.py
|
yiwc/robotics-world
|
48efda3a8ea6741b35828b02860f45753252e376
|
[
"MIT"
] | 681
|
2019-09-09T19:34:37.000Z
|
2022-03-31T12:17:58.000Z
|
metaworld/envs/__init__.py
|
yiwc/robotics-world
|
48efda3a8ea6741b35828b02860f45753252e376
|
[
"MIT"
] | 212
|
2019-09-18T14:43:44.000Z
|
2022-03-27T22:21:00.000Z
|
metaworld/envs/__init__.py
|
yiwc/robotics-world
|
48efda3a8ea6741b35828b02860f45753252e376
|
[
"MIT"
] | 157
|
2019-09-12T05:06:05.000Z
|
2022-03-29T14:47:24.000Z
|
from metaworld.envs.mujoco.env_dict import (ALL_V2_ENVIRONMENTS_GOAL_HIDDEN,
ALL_V2_ENVIRONMENTS_GOAL_OBSERVABLE
)
__all__ = ['ALL_V2_ENVIRONMENTS_GOAL_HIDDEN',
'ALL_V2_ENVIRONMENTS_GOAL_OBSERVABLE']
| 49.833333
| 79
| 0.588629
| 28
| 299
| 5.535714
| 0.464286
| 0.129032
| 0.43871
| 0.541935
| 0.748387
| 0.748387
| 0.748387
| 0.748387
| 0.748387
| 0.748387
| 0
| 0.021053
| 0.364548
| 299
| 6
| 80
| 49.833333
| 0.794737
| 0
| 0
| 0
| 0
| 0
| 0.22
| 0.22
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
9cc99b216084ca125659851051b18ddd5e72d2b0
| 5,500
|
py
|
Python
|
control_system/data/cellDatabase.py
|
bshrram/Graduation-Project---Omnidirectional-Conveyor-Table
|
6414fbcb3d53f3c3351c25ac8b48aa73397c250d
|
[
"MIT"
] | 1
|
2020-09-24T05:06:17.000Z
|
2020-09-24T05:06:17.000Z
|
control_system/data/cellDatabase.py
|
bshrram/Graduation-Project---Omnidirectional-Conveyor-Table
|
6414fbcb3d53f3c3351c25ac8b48aa73397c250d
|
[
"MIT"
] | null | null | null |
control_system/data/cellDatabase.py
|
bshrram/Graduation-Project---Omnidirectional-Conveyor-Table
|
6414fbcb3d53f3c3351c25ac8b48aa73397c250d
|
[
"MIT"
] | 1
|
2020-12-13T13:31:08.000Z
|
2020-12-13T13:31:08.000Z
|
ids = [
[11, 81, 32, 52, 13],
[31, 71, 42, 62, 23],
[21, 61, 12, 72, 33],
[44, 51, 22, 82, 43]
]
cellDatabase = [
# cell_1
{
'code': 1,
'motors': [
{'pins': {'digital': (53, 52), 'pwm': 11}},
{'pins': {'digital': (51, 50), 'pwm': 12}},
{'pins': {'digital': (49, 48), 'pwm': 13}},
]
},
# cell_8
{
'code': 4,
'motors': [
{'pins': {'digital': (29, 28), 'pwm': 5}},
{'pins': {'digital': (31, 30), 'pwm': 6}},
{'pins': {'digital': (33, 32), 'pwm': 7}},
]
},
# cell_3
{
'code': 2,
'motors': [
{'pins': {'digital': (33, 32), 'pwm': 2}},
{'pins': {'digital': (31, 30), 'pwm': 3}},
{'pins': {'digital': (29, 28), 'pwm': 4}},
]
},
# cell_5
{
'code': 3,
'motors': [
{'pins': {'digital': (53, 52), 'pwm': 8}},
{'pins': {'digital': (51, 50), 'pwm': 9}},
{'pins': {'digital': (49, 48), 'pwm': 10}},
]
},
# cell_1
{
'code': 1,
'motors': [
{'pins': {'digital': (53, 52), 'pwm': 11}},
{'pins': {'digital': (51, 50), 'pwm': 12}},
{'pins': {'digital': (49, 48), 'pwm': 13}},
]
},
# cell_3
{
'code': 2,
'motors': [
{'pins': {'digital': (33, 32), 'pwm': 2}},
{'pins': {'digital': (31, 30), 'pwm': 3}},
{'pins': {'digital': (29, 28), 'pwm': 4}},
]
},
# cell_7
{
'code': 4,
'motors': [
{'pins': {'digital': (23, 22), 'pwm': 4}},
{'pins': {'digital': (25, 24), 'pwm': 3}},
{'pins': {'digital': (27, 26), 'pwm': 2}},
]
},
# cell_4
{
'code': 2,
'motors': [
{'pins': {'digital': (27, 26), 'pwm': 5}},
{'pins': {'digital': (25, 24), 'pwm': 6}},
{'pins': {'digital': (23, 22), 'pwm': 7}},
]
},
# cell_6
{
'code': 3,
'motors': [
{'pins': {'digital': (47, 38), 'pwm': 11}},
{'pins': {'digital': (37, 36), 'pwm': 12}},
{'pins': {'digital': (35, 34), 'pwm': 13}},
]
},
# cell_2
{
'code': 1,
'motors': [
{'pins': {'digital': (47, 38), 'pwm': 10}},
{'pins': {'digital': (37, 36), 'pwm': 9}},
{'pins': {'digital': (35, 34), 'pwm': 8}},
]
},
# cell_2
{
'code': 1,
'motors': [
{'pins': {'digital': (47, 38), 'pwm': 10}},
{'pins': {'digital': (37, 36), 'pwm': 9}},
{'pins': {'digital': (35, 34), 'pwm': 8}},
]
},
# cell_6
{
'code': 3,
'motors': [
{'pins': {'digital': (47, 38), 'pwm': 11}},
{'pins': {'digital': (37, 36), 'pwm': 12}},
{'pins': {'digital': (35, 34), 'pwm': 13}},
]
},
# cell_1
{
'code': 1,
'motors': [
{'pins': {'digital': (53, 52), 'pwm': 11}},
{'pins': {'digital': (51, 50), 'pwm': 12}},
{'pins': {'digital': (49, 48), 'pwm': 13}},
]
},
# cell_7
{
'code': 4,
'motors': [
{'pins': {'digital': (23, 22), 'pwm': 4}},
{'pins': {'digital': (25, 24), 'pwm': 3}},
{'pins': {'digital': (27, 26), 'pwm': 2}},
]
},
# cell_3
{
'code': 2,
'motors': [
{'pins': {'digital': (33, 32), 'pwm': 2}},
{'pins': {'digital': (31, 30), 'pwm': 3}},
{'pins': {'digital': (29, 28), 'pwm': 4}},
]
},
# cell_4
{
'code': 2,
'motors': [
{'pins': {'digital': (27, 26), 'pwm': 5}},
{'pins': {'digital': (25, 24), 'pwm': 6}},
{'pins': {'digital': (23, 22), 'pwm': 7}},
]
},
# cell_5
{
'code': 3,
'motors': [
{'pins': {'digital': (53, 52), 'pwm': 8}},
{'pins': {'digital': (51, 50), 'pwm': 9}},
{'pins': {'digital': (49, 48), 'pwm': 10}},
]
},
# cell_2
{
'code': 1,
'motors': [
{'pins': {'digital': (47, 38), 'pwm': 10}},
{'pins': {'digital': (37, 36), 'pwm': 9}},
{'pins': {'digital': (35, 34), 'pwm': 8}},
]
},
# cell_8
{
'code': 4,
'motors': [
{'pins': {'digital': (29, 28), 'pwm': 5}},
{'pins': {'digital': (31, 30), 'pwm': 6}},
{'pins': {'digital': (33, 32), 'pwm': 7}},
]
},
# cell_4
{
'code': 2,
'motors': [
{'pins': {'digital': (27, 26), 'pwm': 5}},
{'pins': {'digital': (25, 24), 'pwm': 6}},
{'pins': {'digital': (23, 22), 'pwm': 7}},
]
},
]
k = 0
for i in range(4):
for j in range(5):
j1 = j*2
if i%2 == 1:
j1 = j1 + 1
cellDatabase[k]['id'] = ids[i][j]
cellDatabase[k]['location'] = [i, j1]
k += 1
y0 = 163.75
x0 = 165.8
dy = 157.5
dx = 90.93 * 2
r=0
for i in range(4):
x0 = 165.8
if i == 1 or i == 3:
x0 = x0+dx/2
for j in range(5):
location = [x0, y0]
cellDatabase[r]['coordinates'] = location
# print(cellDatabase[r]['coordinates'])
x0 = x0+dx
r+= 1
y0=y0+dy
| 24.886878
| 55
| 0.327455
| 574
| 5,500
| 3.102787
| 0.139373
| 0.370578
| 0.190904
| 0.050533
| 0.859068
| 0.845592
| 0.830994
| 0.830994
| 0.830994
| 0.830994
| 0
| 0.13848
| 0.406545
| 5,500
| 220
| 56
| 25
| 0.407169
| 0.032182
| 0
| 0.552083
| 0
| 0
| 0.200226
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
143ee29a245304ad09eb6ebb0377a7a9cc6d49b6
| 2,129
|
py
|
Python
|
plantcv/plantcv/spectral_index/__init__.py
|
YuanYu-Double/plantcv
|
0e11c7f63d96a52487e01e3b67744aa8697eedb2
|
[
"MIT"
] | 1
|
2022-03-12T05:46:03.000Z
|
2022-03-12T05:46:03.000Z
|
plantcv/plantcv/spectral_index/__init__.py
|
YuanYu-Double/plantcv
|
0e11c7f63d96a52487e01e3b67744aa8697eedb2
|
[
"MIT"
] | null | null | null |
plantcv/plantcv/spectral_index/__init__.py
|
YuanYu-Double/plantcv
|
0e11c7f63d96a52487e01e3b67744aa8697eedb2
|
[
"MIT"
] | null | null | null |
from plantcv.plantcv.spectral_index.spectral_index import ndvi
from plantcv.plantcv.spectral_index.spectral_index import gdvi
from plantcv.plantcv.spectral_index.spectral_index import savi
from plantcv.plantcv.spectral_index.spectral_index import pri
from plantcv.plantcv.spectral_index.spectral_index import ari
from plantcv.plantcv.spectral_index.spectral_index import ci_rededge
from plantcv.plantcv.spectral_index.spectral_index import cri550
from plantcv.plantcv.spectral_index.spectral_index import cri700
from plantcv.plantcv.spectral_index.spectral_index import egi
from plantcv.plantcv.spectral_index.spectral_index import evi
from plantcv.plantcv.spectral_index.spectral_index import mari
from plantcv.plantcv.spectral_index.spectral_index import mcari
from plantcv.plantcv.spectral_index.spectral_index import mtci
from plantcv.plantcv.spectral_index.spectral_index import ndre
from plantcv.plantcv.spectral_index.spectral_index import psnd_chla
from plantcv.plantcv.spectral_index.spectral_index import psnd_chlb
from plantcv.plantcv.spectral_index.spectral_index import psnd_car
from plantcv.plantcv.spectral_index.spectral_index import psri
from plantcv.plantcv.spectral_index.spectral_index import pssr_chla
from plantcv.plantcv.spectral_index.spectral_index import pssr_chlb
from plantcv.plantcv.spectral_index.spectral_index import pssr_car
from plantcv.plantcv.spectral_index.spectral_index import rgri
from plantcv.plantcv.spectral_index.spectral_index import rvsi
from plantcv.plantcv.spectral_index.spectral_index import sipi
from plantcv.plantcv.spectral_index.spectral_index import sr
from plantcv.plantcv.spectral_index.spectral_index import vari
from plantcv.plantcv.spectral_index.spectral_index import vi_green
from plantcv.plantcv.spectral_index.spectral_index import wi
# add new functions to end of lists
__all__ = ["ndvi", "gdvi", "savi", "pri", "ari", "ci_rededge", "cri550", "cri700", "egi", "evi", "mari", "mcari",
"mtci", "ndre", "psnd_chla", "psnd_chlb", "psnd_car", "psri", "pssr_chla", "pssr_chlb", "pssr_car", "rgri", "rvsi",
"sipi", "sr", "vari", "vi_green", "wi"]
| 60.828571
| 126
| 0.837952
| 304
| 2,129
| 5.618421
| 0.138158
| 0.42623
| 0.295082
| 0.42623
| 0.846604
| 0.846604
| 0.846604
| 0.846604
| 0.261124
| 0
| 0
| 0.006138
| 0.081729
| 2,129
| 34
| 127
| 62.617647
| 0.867519
| 0.0155
| 0
| 0
| 0
| 0
| 0.070201
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.903226
| 0
| 0.903226
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 9
|
14892c1a03eca18eae53c76475c76262d830ad6a
| 13,566
|
py
|
Python
|
sdk/keyvault/azure-keyvault/azure/keyvault/v7_3_preview/operations/hsm_security_domain_operations.py
|
mccoyp/azure-keyvault-7.3-preview
|
da351753a9d3d2bf97c27566865cd88bae7faa55
|
[
"MIT"
] | null | null | null |
sdk/keyvault/azure-keyvault/azure/keyvault/v7_3_preview/operations/hsm_security_domain_operations.py
|
mccoyp/azure-keyvault-7.3-preview
|
da351753a9d3d2bf97c27566865cd88bae7faa55
|
[
"MIT"
] | null | null | null |
sdk/keyvault/azure-keyvault/azure/keyvault/v7_3_preview/operations/hsm_security_domain_operations.py
|
mccoyp/azure-keyvault-7.3-preview
|
da351753a9d3d2bf97c27566865cd88bae7faa55
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from .. import models
class HSMSecurityDomainOperations(object):
"""HSMSecurityDomainOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "7.3-preview".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
self.api_version = "7.3-preview"
def download_pending(
self, vault_base_url, custom_headers=None, raw=False, **operation_config):
"""Retrieves the Security Domain download operation status.
:param vault_base_url: The vault name, for example
https://myvault.vault.azure.net.
:type vault_base_url: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: SecurityDomainOperationStatus or ClientRawResponse if
raw=true
:rtype: ~securitydomain.models.SecurityDomainOperationStatus or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`KeyVaultErrorException<securitydomain.models.KeyVaultErrorException>`
"""
# Construct URL
url = self.download_pending.metadata['url']
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.KeyVaultErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SecurityDomainOperationStatus', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
download_pending.metadata = {'url': '/securitydomain/download/pending'}
def download(
self, vault_base_url, certificate_info_object, custom_headers=None, raw=False, **operation_config):
"""Retrieves the Security Domain from the managed HSM. Calling this
endpoint can be used to activate a provisioned managed HSM resource.
:param vault_base_url: The vault name, for example
https://myvault.vault.azure.net.
:type vault_base_url: str
:param certificate_info_object: The Security Domain download operation
requires customer to provide N certificates (minimum 3 and maximum 10)
containing a public key in JWK format.
:type certificate_info_object:
~securitydomain.models.CertificateInfoObject
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: SecurityDomainObject or ClientRawResponse if raw=true
:rtype: ~securitydomain.models.SecurityDomainObject or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`KeyVaultErrorException<securitydomain.models.KeyVaultErrorException>`
"""
# Construct URL
url = self.download.metadata['url']
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(certificate_info_object, 'CertificateInfoObject')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [202]:
raise models.KeyVaultErrorException(self._deserialize, response)
deserialized = None
header_dict = {}
if response.status_code == 202:
deserialized = self._deserialize('SecurityDomainObject', response)
header_dict = {
'Retry-After': 'int',
'Azure-AsyncOperation': 'str',
}
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
client_raw_response.add_headers(header_dict)
return client_raw_response
return deserialized
download.metadata = {'url': '/securitydomain/download'}
def transfer_key_method(
self, vault_base_url, custom_headers=None, raw=False, **operation_config):
"""Retrieve Security Domain transfer key.
:param vault_base_url: The vault name, for example
https://myvault.vault.azure.net.
:type vault_base_url: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: TransferKey or ClientRawResponse if raw=true
:rtype: ~securitydomain.models.TransferKey or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`KeyVaultErrorException<securitydomain.models.KeyVaultErrorException>`
"""
# Construct URL
url = self.transfer_key_method.metadata['url']
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.KeyVaultErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('TransferKey', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
transfer_key_method.metadata = {'url': '/securitydomain/upload'}
def upload(
self, vault_base_url, security_domain, custom_headers=None, raw=False, **operation_config):
"""Restore the provided Security Domain.
:param vault_base_url: The vault name, for example
https://myvault.vault.azure.net.
:type vault_base_url: str
:param security_domain: The Security Domain to be restored.
:type security_domain: ~securitydomain.models.SecurityDomainObject
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: SecurityDomainOperationStatus or ClientRawResponse if
raw=true
:rtype: ~securitydomain.models.SecurityDomainOperationStatus or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`KeyVaultErrorException<securitydomain.models.KeyVaultErrorException>`
"""
# Construct URL
url = self.upload.metadata['url']
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(security_domain, 'SecurityDomainObject')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [202, 204]:
raise models.KeyVaultErrorException(self._deserialize, response)
deserialized = None
header_dict = {}
if response.status_code == 202:
deserialized = self._deserialize('SecurityDomainOperationStatus', response)
header_dict = {
'Retry-After': 'int',
'Azure-AsyncOperation': 'str',
}
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
client_raw_response.add_headers(header_dict)
return client_raw_response
return deserialized
upload.metadata = {'url': '/securitydomain/upload'}
def upload_pending(
self, vault_base_url, custom_headers=None, raw=False, **operation_config):
"""Get Security Domain upload operation status.
:param vault_base_url: The vault name, for example
https://myvault.vault.azure.net.
:type vault_base_url: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: SecurityDomainOperationStatus or ClientRawResponse if
raw=true
:rtype: ~securitydomain.models.SecurityDomainOperationStatus or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`KeyVaultErrorException<securitydomain.models.KeyVaultErrorException>`
"""
# Construct URL
url = self.upload_pending.metadata['url']
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.KeyVaultErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SecurityDomainOperationStatus', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
upload_pending.metadata = {'url': '/securitydomain/upload/pending'}
| 40.495522
| 111
| 0.666593
| 1,365
| 13,566
| 6.430037
| 0.136264
| 0.025635
| 0.03418
| 0.01709
| 0.817478
| 0.805514
| 0.7964
| 0.788652
| 0.776575
| 0.776575
| 0
| 0.004179
| 0.241486
| 13,566
| 334
| 112
| 40.616766
| 0.848785
| 0.363998
| 0
| 0.72973
| 0
| 0
| 0.102054
| 0.030172
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040541
| false
| 0
| 0.013514
| 0
| 0.135135
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2124839349bb11116bf3b820decbe1b4112371f7
| 1,612
|
py
|
Python
|
settings/channel_archiver/NIH.TIMING_settings.py
|
bopopescu/Lauecollect
|
60ae2b05ea8596ba0decf426e37aeaca0bc8b6be
|
[
"MIT"
] | null | null | null |
settings/channel_archiver/NIH.TIMING_settings.py
|
bopopescu/Lauecollect
|
60ae2b05ea8596ba0decf426e37aeaca0bc8b6be
|
[
"MIT"
] | 1
|
2019-10-22T21:28:31.000Z
|
2019-10-22T21:39:12.000Z
|
settings/channel_archiver/NIH.TIMING_settings.py
|
bopopescu/Lauecollect
|
60ae2b05ea8596ba0decf426e37aeaca0bc8b6be
|
[
"MIT"
] | 2
|
2019-06-06T15:06:46.000Z
|
2020-07-20T02:03:22.000Z
|
registers.xosct_trig_count.count.filename = '//mx340hs/data/anfinrud_1903/Archive/NIH.TIMING.registers.xosct_trig_count.count.txt'
registers.xosct_acq_count.count.filename = '//mx340hs/data/anfinrud_1903/Archive/NIH.TIMING.registers.xosct_acq_count.count.txt'
registers.xdet_acq_count.count.filename = '//mx340hs/data/anfinrud_1903/Archive/NIH.TIMING.registers.xdet_acq_count.count.txt'
registers.xdet_trig_count.count.filename = '//mx340hs/data/anfinrud_1903/Archive/NIH.TIMING.registers.xdet_trig_count.count.txt'
registers.xdet_state.count.filename = '/net/mx340hs/data/anfinrud_1901/Archive/NIH.TIMING.registers.xdet_state.count.txt'
registers.ch7_state.count.filename = '//mx340hs/data/anfinrud_1906/Archive/NIH.TIMING.registers.ch7_state.count.txt'
registers.acquiring.count.filename = '//mx340hs/data/anfinrud_1906/Archive/NIH.TIMING.registers.acquiring.count.txt'
registers.image_number.filename = '/net/mx340hs/data/anfinrud_1901/Archive/NIH.TIMING.registers.image_number.txt'
registers.image_number.count.filename = '//mx340hs/data/anfinrud_1906/Archive/NIH.TIMING.registers.image_number.count.txt'
registers.ch1_trig_count.count.filename = '//mx340hs/data/anfinrud_1906/Archive/NIH.TIMING.registers.ch1_trig_count.count.txt'
registers.ch1_acq_count.count.filename = '//mx340hs/data/anfinrud_1906/Archive/NIH.TIMING.registers.ch1_acq_count.count.txt'
registers.ch7_trig_count.count.filename = '//mx340hs/data/anfinrud_1906/Archive/NIH.TIMING.registers.ch7_trig_count.count.txt'
registers.ch7_acq_count.count.filename = '//mx340hs/data/anfinrud_1906/Archive/NIH.TIMING.registers.ch7_acq_count.count.txt'
| 124
| 130
| 0.840571
| 235
| 1,612
| 5.540426
| 0.102128
| 0.122888
| 0.189708
| 0.249616
| 0.890937
| 0.745008
| 0.683564
| 0.683564
| 0.683564
| 0.683564
| 0
| 0.064168
| 0.023573
| 1,612
| 13
| 131
| 124
| 0.763024
| 0
| 0
| 0
| 0
| 0.076923
| 0.650961
| 0.650961
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
21557af80079024deacd9b6d46570b08667f7a30
| 16,896
|
py
|
Python
|
sdk/python/pulumi_akamai/network_list_activations.py
|
pulumi/pulumi-akamai
|
85f933ccf2f61738b3074a13fa718132280f8364
|
[
"ECL-2.0",
"Apache-2.0"
] | 3
|
2021-01-21T15:22:12.000Z
|
2021-08-25T14:15:29.000Z
|
sdk/python/pulumi_akamai/network_list_activations.py
|
pulumi/pulumi-akamai
|
85f933ccf2f61738b3074a13fa718132280f8364
|
[
"ECL-2.0",
"Apache-2.0"
] | 59
|
2020-08-13T14:39:36.000Z
|
2022-03-31T15:19:48.000Z
|
sdk/python/pulumi_akamai/network_list_activations.py
|
pulumi/pulumi-akamai
|
85f933ccf2f61738b3074a13fa718132280f8364
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['NetworkListActivationsArgs', 'NetworkListActivations']
@pulumi.input_type
class NetworkListActivationsArgs:
def __init__(__self__, *,
network_list_id: pulumi.Input[str],
notification_emails: pulumi.Input[Sequence[pulumi.Input[str]]],
activate: Optional[pulumi.Input[bool]] = None,
network: Optional[pulumi.Input[str]] = None,
notes: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a NetworkListActivations resource.
:param pulumi.Input[str] network_list_id: The ID of the network list to be activated
:param pulumi.Input[Sequence[pulumi.Input[str]]] notification_emails: A bracketed, comma-separated list of email addresses that will be notified when the
operation is complete.
:param pulumi.Input[str] network: The network to be used, either `STAGING` or `PRODUCTION`. If not supplied, defaults to
`STAGING`.
:param pulumi.Input[str] notes: A comment describing the activation.
"""
pulumi.set(__self__, "network_list_id", network_list_id)
pulumi.set(__self__, "notification_emails", notification_emails)
if activate is not None:
pulumi.set(__self__, "activate", activate)
if network is not None:
pulumi.set(__self__, "network", network)
if notes is not None:
pulumi.set(__self__, "notes", notes)
@property
@pulumi.getter(name="networkListId")
def network_list_id(self) -> pulumi.Input[str]:
"""
The ID of the network list to be activated
"""
return pulumi.get(self, "network_list_id")
@network_list_id.setter
def network_list_id(self, value: pulumi.Input[str]):
pulumi.set(self, "network_list_id", value)
@property
@pulumi.getter(name="notificationEmails")
def notification_emails(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
A bracketed, comma-separated list of email addresses that will be notified when the
operation is complete.
"""
return pulumi.get(self, "notification_emails")
@notification_emails.setter
def notification_emails(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "notification_emails", value)
@property
@pulumi.getter
def activate(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "activate")
@activate.setter
def activate(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "activate", value)
@property
@pulumi.getter
def network(self) -> Optional[pulumi.Input[str]]:
"""
The network to be used, either `STAGING` or `PRODUCTION`. If not supplied, defaults to
`STAGING`.
"""
return pulumi.get(self, "network")
@network.setter
def network(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "network", value)
@property
@pulumi.getter
def notes(self) -> Optional[pulumi.Input[str]]:
"""
A comment describing the activation.
"""
return pulumi.get(self, "notes")
@notes.setter
def notes(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "notes", value)
@pulumi.input_type
class _NetworkListActivationsState:
def __init__(__self__, *,
activate: Optional[pulumi.Input[bool]] = None,
network: Optional[pulumi.Input[str]] = None,
network_list_id: Optional[pulumi.Input[str]] = None,
notes: Optional[pulumi.Input[str]] = None,
notification_emails: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
status: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering NetworkListActivations resources.
:param pulumi.Input[str] network: The network to be used, either `STAGING` or `PRODUCTION`. If not supplied, defaults to
`STAGING`.
:param pulumi.Input[str] network_list_id: The ID of the network list to be activated
:param pulumi.Input[str] notes: A comment describing the activation.
:param pulumi.Input[Sequence[pulumi.Input[str]]] notification_emails: A bracketed, comma-separated list of email addresses that will be notified when the
operation is complete.
:param pulumi.Input[str] status: The string `ACTIVATED` if the activation was successful, or a string identifying the reason why the network
list was not activated.
"""
if activate is not None:
pulumi.set(__self__, "activate", activate)
if network is not None:
pulumi.set(__self__, "network", network)
if network_list_id is not None:
pulumi.set(__self__, "network_list_id", network_list_id)
if notes is not None:
pulumi.set(__self__, "notes", notes)
if notification_emails is not None:
pulumi.set(__self__, "notification_emails", notification_emails)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter
def activate(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "activate")
@activate.setter
def activate(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "activate", value)
@property
@pulumi.getter
def network(self) -> Optional[pulumi.Input[str]]:
"""
The network to be used, either `STAGING` or `PRODUCTION`. If not supplied, defaults to
`STAGING`.
"""
return pulumi.get(self, "network")
@network.setter
def network(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "network", value)
@property
@pulumi.getter(name="networkListId")
def network_list_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the network list to be activated
"""
return pulumi.get(self, "network_list_id")
@network_list_id.setter
def network_list_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "network_list_id", value)
@property
@pulumi.getter
def notes(self) -> Optional[pulumi.Input[str]]:
"""
A comment describing the activation.
"""
return pulumi.get(self, "notes")
@notes.setter
def notes(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "notes", value)
@property
@pulumi.getter(name="notificationEmails")
def notification_emails(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A bracketed, comma-separated list of email addresses that will be notified when the
operation is complete.
"""
return pulumi.get(self, "notification_emails")
@notification_emails.setter
def notification_emails(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "notification_emails", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
The string `ACTIVATED` if the activation was successful, or a string identifying the reason why the network
list was not activated.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
class NetworkListActivations(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
activate: Optional[pulumi.Input[bool]] = None,
network: Optional[pulumi.Input[str]] = None,
network_list_id: Optional[pulumi.Input[str]] = None,
notes: Optional[pulumi.Input[str]] = None,
notification_emails: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
"""
Use the `NetworkListActivations` resource to activate a network list in either the STAGING or PRODUCTION
environment.
## Example Usage
Basic usage:
```python
import pulumi
import pulumi_akamai as akamai
network_lists_filter = akamai.get_network_lists(name=var["network_list"])
activation = akamai.NetworkListActivations("activation",
network_list_id=network_lists_filter.lists[0],
network="STAGING",
notes="TEST Notes",
notification_emails=["user@example.com"])
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] network: The network to be used, either `STAGING` or `PRODUCTION`. If not supplied, defaults to
`STAGING`.
:param pulumi.Input[str] network_list_id: The ID of the network list to be activated
:param pulumi.Input[str] notes: A comment describing the activation.
:param pulumi.Input[Sequence[pulumi.Input[str]]] notification_emails: A bracketed, comma-separated list of email addresses that will be notified when the
operation is complete.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: NetworkListActivationsArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Use the `NetworkListActivations` resource to activate a network list in either the STAGING or PRODUCTION
environment.
## Example Usage
Basic usage:
```python
import pulumi
import pulumi_akamai as akamai
network_lists_filter = akamai.get_network_lists(name=var["network_list"])
activation = akamai.NetworkListActivations("activation",
network_list_id=network_lists_filter.lists[0],
network="STAGING",
notes="TEST Notes",
notification_emails=["user@example.com"])
```
:param str resource_name: The name of the resource.
:param NetworkListActivationsArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(NetworkListActivationsArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
activate: Optional[pulumi.Input[bool]] = None,
network: Optional[pulumi.Input[str]] = None,
network_list_id: Optional[pulumi.Input[str]] = None,
notes: Optional[pulumi.Input[str]] = None,
notification_emails: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = NetworkListActivationsArgs.__new__(NetworkListActivationsArgs)
__props__.__dict__["activate"] = activate
__props__.__dict__["network"] = network
if network_list_id is None and not opts.urn:
raise TypeError("Missing required property 'network_list_id'")
__props__.__dict__["network_list_id"] = network_list_id
__props__.__dict__["notes"] = notes
if notification_emails is None and not opts.urn:
raise TypeError("Missing required property 'notification_emails'")
__props__.__dict__["notification_emails"] = notification_emails
__props__.__dict__["status"] = None
super(NetworkListActivations, __self__).__init__(
'akamai:index/networkListActivations:NetworkListActivations',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
activate: Optional[pulumi.Input[bool]] = None,
network: Optional[pulumi.Input[str]] = None,
network_list_id: Optional[pulumi.Input[str]] = None,
notes: Optional[pulumi.Input[str]] = None,
notification_emails: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
status: Optional[pulumi.Input[str]] = None) -> 'NetworkListActivations':
"""
Get an existing NetworkListActivations resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] network: The network to be used, either `STAGING` or `PRODUCTION`. If not supplied, defaults to
`STAGING`.
:param pulumi.Input[str] network_list_id: The ID of the network list to be activated
:param pulumi.Input[str] notes: A comment describing the activation.
:param pulumi.Input[Sequence[pulumi.Input[str]]] notification_emails: A bracketed, comma-separated list of email addresses that will be notified when the
operation is complete.
:param pulumi.Input[str] status: The string `ACTIVATED` if the activation was successful, or a string identifying the reason why the network
list was not activated.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _NetworkListActivationsState.__new__(_NetworkListActivationsState)
__props__.__dict__["activate"] = activate
__props__.__dict__["network"] = network
__props__.__dict__["network_list_id"] = network_list_id
__props__.__dict__["notes"] = notes
__props__.__dict__["notification_emails"] = notification_emails
__props__.__dict__["status"] = status
return NetworkListActivations(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def activate(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "activate")
@property
@pulumi.getter
def network(self) -> pulumi.Output[Optional[str]]:
"""
The network to be used, either `STAGING` or `PRODUCTION`. If not supplied, defaults to
`STAGING`.
"""
return pulumi.get(self, "network")
@property
@pulumi.getter(name="networkListId")
def network_list_id(self) -> pulumi.Output[str]:
"""
The ID of the network list to be activated
"""
return pulumi.get(self, "network_list_id")
@property
@pulumi.getter
def notes(self) -> pulumi.Output[Optional[str]]:
"""
A comment describing the activation.
"""
return pulumi.get(self, "notes")
@property
@pulumi.getter(name="notificationEmails")
def notification_emails(self) -> pulumi.Output[Sequence[str]]:
"""
A bracketed, comma-separated list of email addresses that will be notified when the
operation is complete.
"""
return pulumi.get(self, "notification_emails")
@property
@pulumi.getter
def status(self) -> pulumi.Output[str]:
"""
The string `ACTIVATED` if the activation was successful, or a string identifying the reason why the network
list was not activated.
"""
return pulumi.get(self, "status")
| 41.718519
| 161
| 0.643466
| 1,907
| 16,896
| 5.489775
| 0.090194
| 0.088261
| 0.080237
| 0.05884
| 0.817939
| 0.794823
| 0.76483
| 0.752221
| 0.728627
| 0.705512
| 0
| 0.000239
| 0.256984
| 16,896
| 404
| 162
| 41.821782
| 0.833679
| 0.317235
| 0
| 0.674208
| 1
| 0
| 0.094044
| 0.014154
| 0
| 0
| 0
| 0
| 0
| 1
| 0.158371
| false
| 0.004525
| 0.022624
| 0.013575
| 0.276018
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
dcee3fd4fe942bb77e56fc141a0abff4e800e432
| 19,183
|
py
|
Python
|
atro/atro_svm/algorithms.py
|
MasaKat0/ATRO
|
05c89440d026cbff5bb39a5eae87cd8f103c62c2
|
[
"MIT"
] | 1
|
2021-03-23T12:31:43.000Z
|
2021-03-23T12:31:43.000Z
|
atro/atro_svm/algorithms.py
|
MasaKat0/ATRO
|
05c89440d026cbff5bb39a5eae87cd8f103c62c2
|
[
"MIT"
] | 1
|
2021-04-05T18:37:37.000Z
|
2021-04-05T18:37:37.000Z
|
atro/atro_svm/algorithms.py
|
MasaKat0/ATRO
|
05c89440d026cbff5bb39a5eae87cd8f103c62c2
|
[
"MIT"
] | null | null | null |
import numpy as np
from scipy.optimize import minimize
class SVM():
def __init__(self, C):
self.C = C
def fit(self, x, y):
N, dim = x.shape
beta0 = np.zeros(dim+N)
fun = lambda beta: self.objective_function(beta, N)
cons = ({'type': 'ineq', 'fun': lambda beta: self.contraint_function(beta, x, y)},
{'type': 'ineq', 'fun': lambda beta: beta[:N]})
res = minimize(fun, beta0, method='SLSQP', constraints=cons)
self.xi, self.gamma = res.x[:N], res.x[N:]
def objective_function(self, beta, N):
xi, omega = beta[:N], beta[N:]
return np.sum(xi) + (self.C/2)*np.sqrt(np.mean(omega**2))
def contraint_function(self, beta, x, y):
N = len(x)
xi, omega = beta[:N], beta[N:]
g = np.dot(x, omega)
g = y*g - 1 + xi
return g
def predict(self, x):
return np.sign(np.dot(x, self.gamma))
def error(self, x, y):
return (len(x) - np.sum(y == self.predict(x)))/len(x)
def model_selection(self, x_train, t_train, x_test, folds=5, num_basis=100):
x_train, x_test = x_train.T, x_test.T
t_train = t_train
XC_dist, TC_dist, CC_dist, n, num_basis = dist(x_train, x_test, num_basis)
# setup the cross validation
cv_fold = np.arange(folds) # normal range behaves strange with == sign
cv_split0 = np.floor(np.arange(n)*folds/n)
cv_index = cv_split0[np.random.permutation(n)]
# set the sigma list and lambda list
sigma_list = np.array([0.01, 0.1, 1.])
lda_list = np.array([0.01, 0.1, 1.])
score_cv = np.zeros((len(sigma_list), len(lda_list)))
for sigma_idx, sigma in enumerate(sigma_list):
# pre-sum to speed up calculation
h_cv = []
t_cv = []
for k in cv_fold:
h_cv.append(np.exp(-XC_dist[:, cv_index==k]/(2*sigma**2)))
t_cv.append(t_train[cv_index==k])
for k in range(folds):
#print(h0_cv[0])
# calculate the h vectors for training and test
count = 0
for j in range(folds):
if j == k:
hte = h_cv[j].T
tte = t_cv[j]
else:
if count == 0:
htr = h_cv[j].T
ttr = t_cv[j]
count += 1
else:
htr = np.append(htr, h_cv[j].T, axis=0)
ttr = np.append(ttr, t_cv[j], axis=0)
one = np.ones((len(htr),1))
htr = np.concatenate([htr, one], axis=1)
one = np.ones((len(hte),1))
hte = np.concatenate([hte, one], axis=1)
for lda_idx, lda in enumerate(lda_list):
self.C = lda
self.fit(htr, ttr.T)
score = self.error(hte, tte)
score_cv[sigma_idx, lda_idx] = score_cv[sigma_idx, lda_idx] + score
(sigma_idx_chosen, lda_idx_chosen) = np.unravel_index(np.argmin(score_cv), score_cv.shape)
sigma_chosen = sigma_list[sigma_idx_chosen]
lda_chosen = lda_list[lda_idx_chosen]
x_train = np.exp(-XC_dist/(2*sigma_chosen**2)).T
x_test = np.exp(-TC_dist/(2*sigma_chosen**2)).T
one = np.ones((len(x_train),1))
x_train = np.concatenate([x_train, one], axis=1)
one = np.ones((len(x_test),1))
x_test = np.concatenate([x_test, one], axis=1)
return x_train, x_test, lda_chosen
class AT():
def __init__(self, C, epsilon):
self.C = C
self.epsilon = epsilon
def fit(self, x, y):
N, dim = x.shape
beta0 = np.zeros(dim+N)
fun = lambda beta: self.objective_function(beta, N)
cons = ({'type': 'ineq', 'fun': lambda beta: self.contraint_function(beta, x, y)},
{'type': 'ineq', 'fun': lambda beta: beta[:N]})
res = minimize(fun, beta0, method='SLSQP', constraints=cons)
self.xi, self.gamma = res.x[:N], res.x[N:]
def objective_function(self, beta, N):
xi, omega = beta[:N], beta[N:]
return np.sum(xi) + (self.C/2)*np.sqrt(np.mean(omega**2))
def contraint_function(self, beta, x, y):
N = len(x)
xi, omega = beta[:N], beta[N:]
g = np.dot(x, omega)
g = y*g - self.epsilon*np.sum(np.abs(omega)) - 1 + xi
return g
def predict(self, x):
return np.sign(np.dot(x, self.gamma))
def error(self, x, y):
return (len(x) - np.sum(y == self.predict(x)))/len(x)
def model_selection(self, x_train, t_train, x_test, folds=5, num_basis=100):
x_train, x_test = x_train.T, x_test.T
t_train = t_train
XC_dist, TC_dist, CC_dist, n, num_basis = dist(x_train, x_test, num_basis)
# setup the cross validation
cv_fold = np.arange(folds) # normal range behaves strange with == sign
cv_split0 = np.floor(np.arange(n)*folds/n)
cv_index = cv_split0[np.random.permutation(n)]
# set the sigma list and lambda list
sigma_list = np.array([0.01, 0.1, 1.])
lda_list = np.array([0.01, 0.1, 1.])
score_cv = np.zeros((len(sigma_list), len(lda_list)))
for sigma_idx, sigma in enumerate(sigma_list):
# pre-sum to speed up calculation
h_cv = []
t_cv = []
for k in cv_fold:
h_cv.append(np.exp(-XC_dist[:, cv_index==k]/(2*sigma**2)))
t_cv.append(t_train[cv_index==k])
for k in range(folds):
#print(h0_cv[0])
# calculate the h vectors for training and test
count = 0
for j in range(folds):
if j == k:
hte = h_cv[j].T
tte = t_cv[j]
else:
if count == 0:
htr = h_cv[j].T
ttr = t_cv[j]
count += 1
else:
htr = np.append(htr, h_cv[j].T, axis=0)
ttr = np.append(ttr, t_cv[j], axis=0)
one = np.ones((len(htr),1))
htr = np.concatenate([htr, one], axis=1)
one = np.ones((len(hte),1))
hte = np.concatenate([hte, one], axis=1)
for lda_idx, lda in enumerate(lda_list):
self.C = lda
self.fit(htr, ttr.T)
score = self.error(hte, tte)
score_cv[sigma_idx, lda_idx] = score_cv[sigma_idx, lda_idx] + score
(sigma_idx_chosen, lda_idx_chosen) = np.unravel_index(np.argmin(score_cv), score_cv.shape)
sigma_chosen = sigma_list[sigma_idx_chosen]
lda_chosen = lda_list[lda_idx_chosen]
x_train = np.exp(-XC_dist/(2*sigma_chosen**2)).T
x_test = np.exp(-TC_dist/(2*sigma_chosen**2)).T
one = np.ones((len(x_train),1))
x_train = np.concatenate([x_train, one], axis=1)
one = np.ones((len(x_test),1))
x_test = np.concatenate([x_test, one], axis=1)
return x_train, x_test, lda_chosen
class MH():
def __init__(self, C, D, cost, alpha, beta):
self.C = C
self.D = D
self.cost = cost
self.alpha = alpha
self.beta = beta
def fit(self, x, y):
N, dim = x.shape
params0 = np.zeros(2*dim+N)
fun = lambda params: self.objective_function(params, x)
cons = ({'type': 'ineq', 'fun': lambda params: self.contraint_function0(params, x, y)},
{'type': 'ineq', 'fun': lambda params: self.contraint_function1(params, x, y)},
{'type': 'ineq', 'fun': lambda params: params[:N]})
res = minimize(fun, params0, method='SLSQP', constraints=cons)
self.xi, self.gamma, self.theta = res.x[:N], res.x[N:-dim], res.x[-dim:]
def objective_function(self, params, x):
N, dim = x.shape
xi, gamma, theta = params[:N], params[N:-dim], params[-dim:]
obj = np.sum(xi) + (self.C/2)*np.sqrt(np.mean(gamma**2)) + (self.D/2)*np.sqrt(np.mean(theta**2))
return obj
def contraint_function0(self, params, x, y):
N, dim = x.shape
xi, gamma, theta = params[:N], params[N:-dim], params[-dim:]
g = np.dot(x, theta)
g = xi - self.cost*(1- self.beta*g)
return g
def contraint_function1(self, params, x, y):
N, dim = x.shape
xi, gamma, theta = params[:N], params[N:-dim], params[-dim:]
g = np.array([np.dot(x[i], (theta/y[i] - gamma)) for i in range(len(y))])
g = xi - 1- (self.alpha/2)*y*g
return g
def predict(self, x):
return np.sign(np.dot(x, self.gamma))
def reject(self, x):
return np.sign(np.dot(x, self.theta))
def error(self, x, y, x_reject, show_rate=False):
N = len(x)
wrong_answer = np.sum(y[self.reject(x_reject) == 1] != self.predict(x)[self.reject(x_reject) == 1])
rejected = self.cost*np.sum(self.reject(x_reject) == -1)
res1 = (wrong_answer + rejected)/N
if show_rate:
return res1, np.mean(self.reject(x_reject) == -1)
else:
return res1
def model_selection(self, x_train, t_train, x_test, folds=5, num_basis=100, algorithm='Ridge', logit=False):
x_train, x_test = x_train.T, x_test.T
t_train = t_train
XC_dist, TC_dist, CC_dist, n, num_basis = dist(x_train, x_test, num_basis)
# setup the cross validation
cv_fold = np.arange(folds) # normal range behaves strange with == sign
cv_split0 = np.floor(np.arange(n)*folds/n)
cv_index = cv_split0[np.random.permutation(n)]
# set the sigma list and lambda list
sigma_list = np.array([0.01, 0.1, 1])
lda0_list = np.array([0.01, 0.1, 1])
lda1_list = np.array([0.01, 0.1, 1])
score_cv = np.zeros((len(sigma_list), len(lda0_list), len(lda1_list)))
for sigma_idx, sigma in enumerate(sigma_list):
# pre-sum to speed up calculation
h_cv = []
t_cv = []
for k in cv_fold:
h_cv.append(np.exp(-XC_dist[:, cv_index==k]/(2*sigma**2)))
t_cv.append(t_train[cv_index==k])
for k in range(folds):
#print(h0_cv[0])
# calculate the h vectors for training and test
count = 0
for j in range(folds):
if j == k:
hte = h_cv[j].T
tte = t_cv[j]
else:
if count == 0:
htr = h_cv[j].T
ttr = t_cv[j]
count += 1
else:
htr = np.append(htr, h_cv[j].T, axis=0)
ttr = np.append(ttr, t_cv[j], axis=0)
one = np.ones((len(htr),1))
htr = np.concatenate([htr, one], axis=1)
one = np.ones((len(hte),1))
hte = np.concatenate([hte, one], axis=1)
for lda0_idx, lda0 in enumerate(lda0_list):
for lda1_idx, lda1 in enumerate(lda1_list):
self.C = lda0
self.D = lda1
self.fit(htr, ttr.T)
score = self.error(hte, tte, hte)
score_cv[sigma_idx, lda0_idx, lda1_idx] = score_cv[sigma_idx, lda0_idx, lda1_idx] + score
(sigma_idx_chosen, lda0_idx_chosen, lda1_idx_chosen) = np.unravel_index(np.argmin(score_cv), score_cv.shape)
sigma_chosen = sigma_list[sigma_idx_chosen]
lda0_chosen = lda0_list[lda0_idx_chosen]
lda1_chosen = lda1_list[lda1_idx_chosen]
x_train = np.exp(-XC_dist/(2*sigma_chosen**2)).T
x_test = np.exp(-TC_dist/(2*sigma_chosen**2)).T
one = np.ones((len(x_train),1))
x_train = np.concatenate([x_train, one], axis=1)
one = np.ones((len(x_test),1))
x_test = np.concatenate([x_test, one], axis=1)
return x_train, x_test, lda0_chosen, lda1_chosen
class ATRO():
def __init__(self, C, D, cost, alpha, beta, epsilon):
self.C = C
self.D = D
self.cost = cost
self.alpha = alpha
self.beta = beta
self.epsilon = epsilon
def fit(self, x, y):
N, dim = x.shape
params0 = np.zeros(2*dim+N)
fun = lambda params: self.objective_function(params, x)
cons = ({'type': 'ineq', 'fun': lambda params: self.contraint_function0(params, x, y)},
{'type': 'ineq', 'fun': lambda params: self.contraint_function1(params, x, y)},
{'type': 'ineq', 'fun': lambda params: params[:N]})
res = minimize(fun, params0, method='SLSQP', constraints=cons)
self.xi, self.gamma, self.theta = res.x[:N], res.x[N:-dim], res.x[-dim:]
def objective_function(self, params, x):
N, dim = x.shape
xi, gamma, theta = params[:N], params[N:-dim], params[-dim:]
obj = np.sum(xi) + (self.C/2)*np.sqrt(np.mean(gamma**2)) + (self.D/2)*np.sqrt(np.mean(theta**2))
return obj
def contraint_function0(self, params, x, y):
N, dim = x.shape
xi, gamma, theta = params[:N], params[N:-dim], params[-dim:]
g = np.dot(x, theta)
g = xi - self.cost*(1- self.beta*(g - self.epsilon*np.sum(np.abs(theta))))
return g
def contraint_function1(self, params, x, y):
N, dim = x.shape
xi, gamma, theta = params[:N], params[N:-dim], params[-dim:]
g = np.array([np.dot(x[i], (theta/y[i] - gamma)) for i in range(len(y))])
zeta = np.array([theta/y[i] - gamma for i in range(len(y))])
g = xi - 1- (self.alpha/2)*(y*g + self.epsilon*np.sum(np.abs(zeta), axis=1))
return g
def predict(self, x):
return np.sign(np.dot(x, self.gamma))
def reject(self, x):
return np.sign(np.dot(x, self.theta))
def error(self, x, y, x_reject, show_rate=False):
N = len(x)
wrong_answer = np.sum(y[self.reject(x_reject) == 1] != self.predict(x)[self.reject(x_reject) == 1])
rejected = self.cost*np.sum(self.reject(x_reject) == -1)
res1 = (wrong_answer + rejected)/N
if show_rate:
return res1, np.mean(self.reject(x_reject) == -1)
else:
return res1
def model_selection(self, x_train, t_train, x_test, folds=5, num_basis=100, algorithm='Ridge', logit=False):
x_train, x_test = x_train.T, x_test.T
t_train = t_train
XC_dist, TC_dist, CC_dist, n, num_basis = dist(x_train, x_test, num_basis)
# setup the cross validation
cv_fold = np.arange(folds) # normal range behaves strange with == sign
cv_split0 = np.floor(np.arange(n)*folds/n)
cv_index = cv_split0[np.random.permutation(n)]
# set the sigma list and lambda list
sigma_list = np.array([0.01, 0.1, 1])
lda0_list = np.array([0.01, 0.1, 1])
lda1_list = np.array([0.01, 0.1, 1])
score_cv = np.zeros((len(sigma_list), len(lda0_list), len(lda1_list)))
for sigma_idx, sigma in enumerate(sigma_list):
# pre-sum to speed up calculation
h_cv = []
t_cv = []
for k in cv_fold:
h_cv.append(np.exp(-XC_dist[:, cv_index==k]/(2*sigma**2)))
t_cv.append(t_train[cv_index==k])
for k in range(folds):
#print(h0_cv[0])
# calculate the h vectors for training and test
count = 0
for j in range(folds):
if j == k:
hte = h_cv[j].T
tte = t_cv[j]
else:
if count == 0:
htr = h_cv[j].T
ttr = t_cv[j]
count += 1
else:
htr = np.append(htr, h_cv[j].T, axis=0)
ttr = np.append(ttr, t_cv[j], axis=0)
one = np.ones((len(htr),1))
htr = np.concatenate([htr, one], axis=1)
one = np.ones((len(hte),1))
hte = np.concatenate([hte, one], axis=1)
for lda0_idx, lda0 in enumerate(lda0_list):
for lda1_idx, lda1 in enumerate(lda1_list):
self.C = lda0
self.D = lda1
self.fit(htr, ttr.T)
score = self.error(hte, tte, hte)
score_cv[sigma_idx, lda0_idx, lda1_idx] = score_cv[sigma_idx, lda0_idx, lda1_idx] + score
(sigma_idx_chosen, lda0_idx_chosen, lda1_idx_chosen) = np.unravel_index(np.argmin(score_cv), score_cv.shape)
sigma_chosen = sigma_list[sigma_idx_chosen]
lda0_chosen = lda0_list[lda0_idx_chosen]
lda1_chosen = lda1_list[lda1_idx_chosen]
x_train = np.exp(-XC_dist/(2*sigma_chosen**2)).T
x_test = np.exp(-TC_dist/(2*sigma_chosen**2)).T
one = np.ones((len(x_train),1))
x_train = np.concatenate([x_train, one], axis=1)
one = np.ones((len(x_test),1))
x_test = np.concatenate([x_test, one], axis=1)
return x_train, x_test, lda0_chosen, lda1_chosen
def dist(x, T=None, num_basis=False):
(d,n) = x.shape
# check input argument
if num_basis is False:
num_basis = 100000
idx = np.random.permutation(n)[0:num_basis]
C = x[:, idx]
# calculate the squared distances
XC_dist = CalcDistanceSquared(x, C)
TC_dist = CalcDistanceSquared(T, C)
CC_dist = CalcDistanceSquared(C, C)
return XC_dist, TC_dist, CC_dist, n, num_basis
def CalcDistanceSquared(X, C):
'''
Calculates the squared distance between X and C.
XC_dist2 = CalcDistSquared(X, C)
[XC_dist2]_{ij} = ||X[:, j] - C[:, i]||2
:param X: dxn: First set of vectors
:param C: d:nc Second set of vectors
:return: XC_dist2: The squared distance nc x n
'''
Xsum = np.sum(X**2, axis=0).T
Csum = np.sum(C**2, axis=0)
XC_dist = Xsum[np.newaxis, :] + Csum[:, np.newaxis] - 2*np.dot(C.T, X)
return XC_dist
| 37.393762
| 116
| 0.507116
| 2,725
| 19,183
| 3.412477
| 0.064587
| 0.023228
| 0.017206
| 0.020647
| 0.93139
| 0.92924
| 0.92924
| 0.926981
| 0.916873
| 0.913969
| 0
| 0.023448
| 0.357504
| 19,183
| 513
| 117
| 37.393762
| 0.731034
| 0.056508
| 0
| 0.912088
| 0
| 0
| 0.007761
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.093407
| false
| 0
| 0.005495
| 0.021978
| 0.186813
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b4f00d6c4f0471b062edd68766cff227b19b36fd
| 15,312
|
py
|
Python
|
pylearn2/sandbox/cuda_convnet/tests/test_filter_acts.py
|
Menerve/pylearn2
|
ad7bcfda3294404aebd71f5a5c4a8623d401a98e
|
[
"BSD-3-Clause"
] | 3
|
2016-01-23T10:18:39.000Z
|
2019-02-28T06:22:45.000Z
|
pylearn2/sandbox/cuda_convnet/tests/test_filter_acts.py
|
Menerve/pylearn2
|
ad7bcfda3294404aebd71f5a5c4a8623d401a98e
|
[
"BSD-3-Clause"
] | null | null | null |
pylearn2/sandbox/cuda_convnet/tests/test_filter_acts.py
|
Menerve/pylearn2
|
ad7bcfda3294404aebd71f5a5c4a8623d401a98e
|
[
"BSD-3-Clause"
] | null | null | null |
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow", "David Warde-Farley"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
from pylearn2.testing.skip import skip_if_no_gpu
skip_if_no_gpu()
import numpy as np
from theano import shared
from theano.tensor import grad, constant
from pylearn2.sandbox.cuda_convnet.filter_acts import FilterActs
from theano.sandbox.cuda import gpu_from_host
from theano.sandbox.cuda import host_from_gpu
from theano.sandbox.rng_mrg import MRG_RandomStreams
from theano.tensor.nnet.conv import conv2d
from theano import function
from theano import tensor as T
import warnings
def test_match_valid_conv():
# Tests that running FilterActs with no padding is the same as running
# theano's conv2D in valid mode
rng = np.random.RandomState([2012,10,9])
batch_size = 5
rows = 10
cols = 9
channels = 3
filter_rows = 4
filter_cols = filter_rows
num_filters = 16
images = shared(rng.uniform(-1., 1., (channels, rows, cols,
batch_size)).astype('float32'), name='images')
filters = shared(rng.uniform(-1., 1., (channels, filter_rows,
filter_cols, num_filters)).astype('float32'), name='filters')
gpu_images = gpu_from_host(images)
gpu_filters = gpu_from_host(filters)
output = FilterActs()(gpu_images, gpu_filters)
output = host_from_gpu(output)
images_bc01 = images.dimshuffle(3,0,1,2)
filters_bc01 = filters.dimshuffle(3,0,1,2)
filters_bc01 = filters_bc01[:,:,::-1,::-1]
output_conv2d = conv2d(images_bc01, filters_bc01,
border_mode='valid')
output_conv2d = output_conv2d.dimshuffle(1,2,3,0)
f = function([], [output, output_conv2d])
output, output_conv2d = f()
warnings.warn("""test_match_valid_conv success criterion is not very strict. Can we verify that this is OK?
One possibility is that theano is numerically unstable and Alex's code is better.
Probably theano CPU 64 bit is OK but it's worth checking the others.""")
if np.abs(output - output_conv2d).max() > 2.4e-6:
assert type(output) == type(output_conv2d)
assert output.dtype == output_conv2d.dtype
if output.shape != output_conv2d.shape:
print 'cuda-convnet shape: ',output.shape
print 'theano shape: ',output_conv2d.shape
assert False
err = np.abs(output - output_conv2d)
print 'absolute error range: ', (err.min(), err.max())
print 'mean absolute error: ', err.mean()
print 'cuda-convnet value range: ', (output.min(), output.max())
print 'theano value range: ', (output_conv2d.min(), output_conv2d.max())
assert False
def test_match_valid_conv_strided():
# Tests that running FilterActs with stride is the same as running
# theano's conv2D in valid mode and then downsampling
rng = np.random.RandomState([2012,10,9])
batch_size = 5
rows = 9
cols = 9
channels = 3
filter_rows = 3
filter_cols = filter_rows
stride = 3
num_filters = 16
images = shared(rng.uniform(-1., 1., (channels, rows, cols,
batch_size)).astype('float32'), name='images')
filters = shared(rng.uniform(-1., 1., (channels, filter_rows,
filter_cols, num_filters)).astype('float32'), name='filters')
gpu_images = gpu_from_host(images)
gpu_filters = gpu_from_host(filters)
output = FilterActs(stride=stride)(gpu_images, gpu_filters)
output = host_from_gpu(output)
images_bc01 = images.dimshuffle(3,0,1,2)
filters_bc01 = filters.dimshuffle(3,0,1,2)
filters_bc01 = filters_bc01[:,:,::-1,::-1]
output_conv2d = conv2d(images_bc01, filters_bc01,
border_mode='valid', subsample=(stride, stride))
output_conv2d_orig = output_conv2d.dimshuffle(1,2,3,0)
output_conv2d = output_conv2d_orig # [:, ::stride, ::stride, :]
f = function([], [output, output_conv2d, output_conv2d_orig])
output, output_conv2d, output_conv2d_orig = f()
warnings.warn("""test_match_valid_conv success criterion is not very strict. Can we verify that this is OK?
One possibility is that theano is numerically unstable and Alex's code is better.
Probably theano CPU 64 bit is OK but it's worth checking the others.""")
if np.abs(output - output_conv2d).max() > 2.4e-6:
assert type(output) == type(output_conv2d)
assert output.dtype == output_conv2d.dtype
if output.shape != output_conv2d.shape:
print 'cuda-convnet shape: ',output.shape
print 'theano shape: ',output_conv2d.shape
assert False
err = np.abs(output - output_conv2d)
print 'absolute error range: ', (err.min(), err.max())
print 'mean absolute error: ', err.mean()
print 'cuda-convnet value range: ', (output.min(), output.max())
print 'theano value range: ', (output_conv2d.min(), output_conv2d.max())
assert False
def test_match_valid_conv_padded():
# Tests that running FilterActs with no padding is the same as running
# theano's conv2D in valid mode
rng = np.random.RandomState([2012,10,9])
batch_size = 5
rows = 10
cols = 9
channels = 3
filter_rows = 4
filter_cols = filter_rows
num_filters = 16
images = shared(rng.uniform(-1., 1., (channels, rows, cols,
batch_size)).astype('float32'), name='images')
filters = shared(rng.uniform(-1., 1., (channels, filter_rows,
filter_cols, num_filters)).astype('float32'), name='filters')
gpu_images = gpu_from_host(images)
gpu_filters = gpu_from_host(filters)
PAD = 3
output = FilterActs(PAD)(gpu_images, gpu_filters)
output = host_from_gpu(output)
images_bc01 = T.alloc(0., batch_size, channels, rows + PAD * 2, cols + PAD * 2)
images_bc01 = T.set_subtensor(images_bc01[:,:,PAD:-PAD,PAD:-PAD], images.dimshuffle(3,0,1,2))
filters_bc01 = filters.dimshuffle(3,0,1,2)
filters_bc01 = filters_bc01[:,:,::-1,::-1]
output_conv2d = conv2d(images_bc01, filters_bc01,
border_mode='valid')
output_conv2d = output_conv2d.dimshuffle(1,2,3,0)
f = function([], [output, output_conv2d])
output, output_conv2d = f()
warnings.warn("""test_match_valid_conv success criterion is not very strict. Can we verify that this is OK?
One possibility is that theano is numerically unstable and Alex's code is better.
Probably theano CPU 64 bit is OK but it's worth checking the others.""")
assert output.shape == output_conv2d.shape
if np.abs(output - output_conv2d).max() > 2.4e-6:
assert type(output) == type(output_conv2d)
assert output.dtype == output_conv2d.dtype
if output.shape != output_conv2d.shape:
print 'cuda-convnet shape: ',output.shape
print 'theano shape: ',output_conv2d.shape
assert False
err = np.abs(output - output_conv2d)
print 'absolute error range: ', (err.min(), err.max())
print 'mean absolute error: ', err.mean()
print 'cuda-convnet value range: ', (output.min(), output.max())
print 'theano value range: ', (output_conv2d.min(), output_conv2d.max())
assert False
def test_grad():
rng = np.random.RandomState([2012, 10, 9])
batch_size = 5
rows = 10
cols = 9
channels = 3
filter_rows = 4
filter_cols = filter_rows
num_filters = 16
images = shared(rng.uniform(-1., 1., (channels, rows, cols,
batch_size)).astype('float32'), name='images')
filters = shared(rng.uniform(-1., 1., (channels, filter_rows,
filter_cols, num_filters)).astype('float32'), name='filters')
gpu_images = gpu_from_host(images)
gpu_filters = gpu_from_host(filters)
output = FilterActs()(gpu_images, gpu_filters)
output = host_from_gpu(output)
# Proper random projection, like verify_grad does.
cost_weights = rng.normal(size=(num_filters, rows - filter_rows + 1,
cols - filter_cols + 1, batch_size))
cost = (constant(cost_weights) * output).sum()
images_bc01 = images.dimshuffle(3,0,1,2)
filters_bc01 = filters.dimshuffle(3,0,1,2)
filters_bc01 = filters_bc01[:,:,::-1,::-1]
output_conv2d = conv2d(images_bc01, filters_bc01,
border_mode='valid')
output_conv2d = output_conv2d.dimshuffle(1,2,3,0)
# XXX: use verify_grad
images_grad, filters_grad = grad(cost.sum(), [images, filters])
reference_cost = (constant(cost_weights) * output_conv2d).sum()
images_conv2d_grad, filters_conv2d_grad = grad(reference_cost,
[images, filters])
f = function([], [images_grad, filters_grad,
images_conv2d_grad,
filters_conv2d_grad])
images_grad, filters_grad, images_conv2d_grad, filters_conv2d_grad = f()
warnings.warn("""test_match_valid_conv success criterion is not very strict. Can we verify that this is OK?
One possibility is that theano is numerically unstable and Alex's code is better.
Probably theano CPU 64 bit is OK but it's worth checking the others.""")
# XXX: Refactor
if np.abs(images_grad - images_conv2d_grad).max() > 1.15e-5:
print "=== IMAGES GRADIENT ==="
assert type(images_grad) == type(images_conv2d_grad)
assert images_grad.dtype == images_conv2d_grad.dtype
if images_grad.shape != images_conv2d_grad.shape:
print 'cuda-convnet shape: ',images_grad.shape
print 'theano shape: ',images_conv2d_grad.shape
assert False
err = np.abs(images_grad - images_conv2d_grad)
print 'absolute error range: ', (err.min(), err.max())
print 'mean absolute error: ', err.mean()
print 'cuda-convnet value range: ', (images_grad.min(),
images_grad.max())
print 'theano value range: ', (images_conv2d_grad.min(),
images_conv2d_grad.max())
assert False
if np.abs(filters_grad - filters_conv2d_grad).max() > 1.15e-5:
print "=== FILTERS GRADIENT ==="
assert type(filters_grad) == type(filters_conv2d_grad)
assert filters_grad.dtype == filters_conv2d_grad.dtype
if filters_grad.shape != filters_conv2d_grad.shape:
print 'cuda-convnet shape: ',filters_grad.shape
print 'theano shape: ',filters_conv2d_grad.shape
assert False
err = np.abs(filters_grad - filters_conv2d_grad)
print 'absolute error range: ', (err.min(), err.max())
print 'mean absolute error: ', err.mean()
print 'cuda-convnet value range: ', (filters_grad.min(),
filters_grad.max())
print 'theano value range: ', (filters_conv2d_grad.min(),
filters_conv2d_grad.max())
assert False
def test_grad_strided():
rng = np.random.RandomState([2012, 10, 9])
batch_size = 5
rows = 9
cols = 9
channels = 3
filter_rows = 3
filter_cols = filter_rows
num_filters = 16
stride = 3
images = shared(rng.uniform(-1., 1., (channels, rows, cols,
batch_size)).astype('float32'), name='images')
filters = shared(rng.uniform(-1., 1., (channels, filter_rows,
filter_cols, num_filters)).astype('float32'), name='filters')
gpu_images = gpu_from_host(images)
gpu_filters = gpu_from_host(filters)
output = FilterActs(stride=stride)(gpu_images, gpu_filters)
output = host_from_gpu(output)
images_bc01 = images.dimshuffle(3,0,1,2)
filters_bc01 = filters.dimshuffle(3,0,1,2)
filters_bc01 = filters_bc01[:,:,::-1,::-1]
output_conv2d = conv2d(images_bc01, filters_bc01,
border_mode='valid', subsample=(stride, stride))
output_conv2d = output_conv2d.dimshuffle(1,2,3,0)
checker = function([], [output, output_conv2d])
output_numpy, output_conv2d_numpy = checker()
if output_numpy.shape != output_conv2d_numpy.shape:
raise AssertionError("theano and cuda convnet follow different conventions for this input size, so we can't test cuda convnet by matching it against theano for these inputs")
# Proper random projection, like verify_grad does.
theano_rng = MRG_RandomStreams(2013*5*4)
cost_weights = theano_rng.normal(size=output_conv2d.shape, dtype=output_conv2d.dtype)
cost = (cost_weights * output).sum()
# XXX: use verify_grad
images_grad, filters_grad = grad(cost, [images, filters])
reference_cost = (cost_weights * output_conv2d).sum()
images_conv2d_grad, filters_conv2d_grad = grad(reference_cost, [images, filters])
f = function([], [images_grad, filters_grad, images_conv2d_grad,
filters_conv2d_grad])
images_grad, filters_grad, images_conv2d_grad, filters_conv2d_grad = f()
warnings.warn("""test_match_valid_conv success criterion is not very strict. Can we verify that this is OK?
One possibility is that theano is numerically unstable and Alex's code is better.
Probably theano CPU 64 bit is OK but it's worth checking the others.""")
# XXX: Refactor
if np.abs(images_grad - images_conv2d_grad).max() > 1.15e-5:
print "=== IMAGES GRADIENT ==="
assert type(images_grad) == type(images_conv2d_grad)
assert images_grad.dtype == images_conv2d_grad.dtype
if images_grad.shape != images_conv2d_grad.shape:
print 'cuda-convnet shape: ',images_grad.shape
print 'theano shape: ',images_conv2d_grad.shape
assert False
err = np.abs(images_grad - images_conv2d_grad)
print 'absolute error range: ', (err.min(), err.max())
print 'mean absolute error: ', err.mean()
print 'cuda-convnet value range: ', (images_grad.min(),
images_grad.max())
print 'theano value range: ', (images_conv2d_grad.min(),
images_conv2d_grad.max())
assert False
if np.abs(filters_grad - filters_conv2d_grad).max() > 1e-5:
print "=== FILTERS GRADIENT ==="
assert type(filters_grad) == type(filters_conv2d_grad)
assert filters_grad.dtype == filters_conv2d_grad.dtype
if filters_grad.shape != filters_conv2d_grad.shape:
print 'cuda-convnet shape: ',filters_grad.shape
print 'theano shape: ',filters_conv2d_grad.shape
assert False
err = np.abs(filters_grad - filters_conv2d_grad)
print 'absolute error range: ', (err.min(), err.max())
print 'mean absolute error: ', err.mean()
print 'cuda-convnet value range: ', (filters_grad.min(),
filters_grad.max())
print 'theano value range: ', (filters_conv2d_grad.min(),
filters_conv2d_grad.max())
assert False
if __name__ == '__main__':
test_match_valid_conv_padded()
| 40.08377
| 182
| 0.646878
| 2,003
| 15,312
| 4.729905
| 0.092361
| 0.072198
| 0.037154
| 0.017944
| 0.876504
| 0.848638
| 0.838716
| 0.825839
| 0.825839
| 0.823095
| 0
| 0.035776
| 0.242424
| 15,312
| 381
| 183
| 40.188976
| 0.780948
| 0.033242
| 0
| 0.803448
| 0
| 0.003448
| 0.190763
| 0.008723
| 0
| 0
| 0
| 0
| 0.103448
| 0
| null | null | 0
| 0.041379
| null | null | 0.158621
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
370215ffc3ea55a38c0ce09625f94f7b42f43bf4
| 916
|
py
|
Python
|
Notebooks/load_classes.py
|
r0han99/ODIR
|
1f68cda76e80ceebd465ffbc3dedad6f9e16f930
|
[
"MIT"
] | null | null | null |
Notebooks/load_classes.py
|
r0han99/ODIR
|
1f68cda76e80ceebd465ffbc3dedad6f9e16f930
|
[
"MIT"
] | null | null | null |
Notebooks/load_classes.py
|
r0han99/ODIR
|
1f68cda76e80ceebd465ffbc3dedad6f9e16f930
|
[
"MIT"
] | null | null | null |
import re
def fetch_classdict(mode='all'):
if mode == 'all':
with open('class_names.txt','r') as f:
content = list(f.readlines())
combined = ' '.join(content)
# fetching chars
pattern = r'\(([A-Z]{1})\)'
finds = re.findall(pattern, combined)
# packaging
classes = {}
for item, token in zip(content, finds):
item = item.split(' ')[0]
classes[token] = item
return classes
else:
with open('coi.txt','r') as f:
content = list(f.readlines())
combined = ' '.join(content)
# fetching chars
pattern = r'\(([A-Z]{1})\)'
finds = re.findall(pattern, combined)
# packaging
classes = {}
for item, token in zip(content, finds):
item = item.split(' ')[0]
classes[token] = item
return classes
| 22.9
| 47
| 0.49345
| 100
| 916
| 4.5
| 0.41
| 0.031111
| 0.026667
| 0.031111
| 0.835556
| 0.835556
| 0.835556
| 0.835556
| 0.835556
| 0.835556
| 0
| 0.006826
| 0.360262
| 916
| 39
| 48
| 23.487179
| 0.761092
| 0.053493
| 0
| 0.75
| 0
| 0
| 0.072177
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.041667
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2ed7184d4e5a31eb2b2f597338898671ef4a070a
| 5,656
|
py
|
Python
|
test_unwrap.py
|
FelixTheC/python_morsels
|
e5efa7e88d03a85ebfa34059328f6b446318c1f2
|
[
"MIT"
] | null | null | null |
test_unwrap.py
|
FelixTheC/python_morsels
|
e5efa7e88d03a85ebfa34059328f6b446318c1f2
|
[
"MIT"
] | null | null | null |
test_unwrap.py
|
FelixTheC/python_morsels
|
e5efa7e88d03a85ebfa34059328f6b446318c1f2
|
[
"MIT"
] | null | null | null |
from textwrap import dedent
import unittest
from unwrap import unwrap_lines
class UnwrapLinesTests(unittest.TestCase):
"""Tests for unwrap_lines"""
maxDiff = 5000
def test_already_wrapped(self):
text = "This text is already all on one line"
self.assertEqual(unwrap_lines(text).strip(), text)
def test_single_paragraph(self):
text = dedent("""
Whether I'm teaching new Pythonistas or long-time Python
programmers, I frequently find that **Python programmers
underutilize multiple assignment**.
""").strip()
unwrapped_text = unwrap_lines(text)
self.assertEqual(
unwrapped_text.strip(),
"Whether I'm teaching new Pythonistas or long-time Python " +
"programmers, I frequently find that **Python programmers " +
"underutilize multiple assignment**."
)
def test_multiple_paragraphs(self):
text = dedent("""
Whether I'm teaching new Pythonistas or long-time Python
programmers, I frequently find that **Python programmers
underutilize multiple assignment**.
Multiple assignment (also known as tuple unpacking or iterable
unpacking) allows you to assign multiple variables at the same
time in one line of code. This feature often seems simple after
you've learned about it, but **it can be tricky to recall
multiple assignment when you need it most**.
In this article we'll see what multiple assignment is, we'll take
a look at common uses of multiple assignment, and then we'll look
at a few uses for multiple assignment that are often overlooked.
Note that in this article I will be using [f-strings][] which are
a Python 3.6+ feature. If you're on an older version of Python,
you'll need to mentally translate those to use the string
`format` method.
""").strip()
unwrapped_text = unwrap_lines(text)
self.assertEqual(
unwrapped_text.strip(),
"Whether I'm teaching new Pythonistas or long-time Python "
"programmers, I frequently find that **Python programmers "
"underutilize multiple assignment**."
"\n\n"
"Multiple assignment (also known as tuple unpacking or iterable "
"unpacking) allows you to assign multiple variables at the same "
"time in one line of code. This feature often seems simple after "
"you've learned about it, but **it can be tricky to recall "
"multiple assignment when you need it most**."
"\n\n"
"In this article we'll see what multiple assignment is, we'll "
"take a look at common uses of multiple assignment, and then "
"we'll look at a few uses for multiple assignment that are "
"often overlooked."
"\n\n"
"Note that in this article I will be using [f-strings][] which "
"are a Python 3.6+ feature. If you're on an older version of "
"Python, you'll need to mentally translate those to use the "
"string `format` method."
)
# To test the Bonus part of this exercise, comment out the following line
# @unittest.expectedFailure
def test_extra_line_breaks_preserved(self):
text = dedent("""
This is a line
that is followed by another line
There are 2 blank lines before this
And there was 1 before this
And three before this one
""").lstrip()
expected = dedent("""
This is a line that is followed by another line
There are 2 blank lines before this And there was 1 before this
And three before this one
""").lstrip()
unwrapped_text = unwrap_lines(text)
self.assertEqual(unwrapped_text, expected)
# To test the Bonus part of this exercise, comment out the following line
# @unittest.expectedFailure
def test_preserve_line_break_if_line_ends_in_spaces(self):
text = dedent("""
This is a line ends in two spaces
So this line doesn't wrap into it
This line doesn't end in spaces
So this line does wrap into it
This line ends in 1 space
So this line does wrap
""").lstrip()
expected = dedent("""
This is a line ends in two spaces
So this line doesn't wrap into it
This line doesn't end in spaces So this line does wrap into it
This line ends in 1 space So this line does wrap
""").lstrip()
unwrapped_text = unwrap_lines(text)
self.assertEqual(unwrapped_text, expected)
# To test the Bonus part of this exercise, comment out the following line
# @unittest.expectedFailure
def test_code_blocks_and_bullet_points(self):
text = dedent("""
This text has bullet points:
- Point 1
- Point 2
There are also numbered points:
1. First item
2. Second item
""").lstrip()
expected = dedent("""
This text has bullet points:
- Point 1
- Point 2
There are also numbered points:
1. First item
2. Second item
""").lstrip()
unwrapped_text = unwrap_lines(text)
self.assertEqual(unwrapped_text, expected)
if __name__ == "__main__":
unittest.main(verbosity=2)
| 36.727273
| 78
| 0.606966
| 722
| 5,656
| 4.686981
| 0.229917
| 0.074468
| 0.026596
| 0.035461
| 0.87766
| 0.87766
| 0.87766
| 0.868203
| 0.868203
| 0.868203
| 0
| 0.006061
| 0.329031
| 5,656
| 154
| 79
| 36.727273
| 0.885639
| 0.056047
| 0
| 0.495652
| 0
| 0
| 0.669732
| 0
| 0
| 0
| 0
| 0
| 0.052174
| 1
| 0.052174
| false
| 0
| 0.026087
| 0
| 0.095652
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2ef38f8236d5e2607f9f4afdd01587fd16fa3211
| 69
|
py
|
Python
|
image_text_contrastive/__init__.py
|
amsword/image_text_contrastive
|
8ba34b46355117c11004d0103790dd8e47afea02
|
[
"MIT"
] | null | null | null |
image_text_contrastive/__init__.py
|
amsword/image_text_contrastive
|
8ba34b46355117c11004d0103790dd8e47afea02
|
[
"MIT"
] | null | null | null |
image_text_contrastive/__init__.py
|
amsword/image_text_contrastive
|
8ba34b46355117c11004d0103790dd8e47afea02
|
[
"MIT"
] | null | null | null |
from .image_text_contrastive_loss import image_text_contrastive_loss
| 34.5
| 68
| 0.927536
| 10
| 69
| 5.8
| 0.6
| 0.310345
| 0.689655
| 0.827586
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.057971
| 69
| 1
| 69
| 69
| 0.892308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
2c18fe82b2a6662f52d27d37d5380cfec8371291
| 78,024
|
py
|
Python
|
alien4cloud-cloudify3-provider/src/test/resources/outputs/blueprints/amazon/lamp/plugins/custom_wf_plugin/plugin/workflows.py
|
alien4cloud/alien4cloud-cloudify4-provider
|
97faee855255eb0c3ce25bb3075c29acd11a63c5
|
[
"Apache-2.0"
] | null | null | null |
alien4cloud-cloudify3-provider/src/test/resources/outputs/blueprints/amazon/lamp/plugins/custom_wf_plugin/plugin/workflows.py
|
alien4cloud/alien4cloud-cloudify4-provider
|
97faee855255eb0c3ce25bb3075c29acd11a63c5
|
[
"Apache-2.0"
] | 3
|
2015-12-04T15:27:22.000Z
|
2016-04-08T11:32:43.000Z
|
alien4cloud-cloudify3-provider/src/test/resources/outputs/blueprints/amazon/lamp/plugins/custom_wf_plugin/plugin/workflows.py
|
alien4cloud/alien4cloud-cloudify4-provider
|
97faee855255eb0c3ce25bb3075c29acd11a63c5
|
[
"Apache-2.0"
] | 16
|
2015-01-29T10:05:09.000Z
|
2019-06-24T19:23:54.000Z
|
from cloudify.decorators import workflow
from cloudify.workflows import ctx
from cloudify.workflows import tasks as workflow_tasks
from utils import set_state_task
from utils import operation_task
from utils import link_tasks
from utils import CustomContext
from utils import generate_native_node_workflows
from utils import _get_all_nodes
from utils import _get_all_nodes_instances
from utils import _get_all_modified_node_instances
from utils import is_host_node
from utils import is_kubernetes_node
from utils import relationship_operation_task
from workflow import WfStartEvent
from workflow import build_pre_event
# Subworkflow for host
def install_host_server(ctx, graph, custom_context):
custom_context.add_customized_wf_node('Apache')
custom_context.add_customized_wf_node('Apache')
custom_context.add_customized_wf_node('Apache')
custom_context.add_customized_wf_node('Wordpress')
custom_context.add_customized_wf_node('Apache')
custom_context.add_customized_wf_node('Wordpress')
custom_context.add_customized_wf_node('Apache')
custom_context.add_customized_wf_node('Wordpress')
custom_context.add_customized_wf_node('Wordpress')
custom_context.add_customized_wf_node('Apache')
custom_context.add_customized_wf_node('PHP')
custom_context.add_customized_wf_node('PHP')
custom_context.add_customized_wf_node('Apache')
custom_context.add_customized_wf_node('Wordpress')
custom_context.add_customized_wf_node('Wordpress')
custom_context.add_customized_wf_node('Wordpress')
custom_context.add_customized_wf_node('PHP')
custom_context.add_customized_wf_node('PHP')
custom_context.add_customized_wf_node('PHP')
custom_context.add_customized_wf_node('PHP')
custom_context.add_customized_wf_node('PHP')
set_state_task(ctx, graph, 'Apache', 'created', 'Apache_created', custom_context)
relationship_operation_task(ctx, graph, 'PHP', 'Server', 'cloudify.interfaces.relationship_lifecycle.preconfigure', 'SOURCE', 'PHP_host_pre_configure_source', custom_context)
set_state_task(ctx, graph, 'Apache', 'started', 'Apache_started', custom_context)
set_state_task(ctx, graph, 'Apache', 'configured', 'Apache_configured', custom_context)
relationship_operation_task(ctx, graph, 'Wordpress', 'Mysql', 'cloudify.interfaces.relationship_lifecycle.postconfigure', 'TARGET', 'Wordpress_database_post_configure_target', custom_context)
relationship_operation_task(ctx, graph, 'PHP', 'Server', 'cloudify.interfaces.relationship_lifecycle.establish', 'SOURCE', 'PHP_host_add_target', custom_context)
operation_task(ctx, graph, 'Wordpress', 'cloudify.interfaces.lifecycle.start', 'Wordpress_start', custom_context)
relationship_operation_task(ctx, graph, 'Wordpress', 'Mysql', 'cloudify.interfaces.relationship_lifecycle.postconfigure', 'SOURCE', 'Wordpress_database_post_configure_source', custom_context)
operation_task(ctx, graph, 'PHP', 'cloudify.interfaces.lifecycle.configure', 'PHP_configure', custom_context)
relationship_operation_task(ctx, graph, 'Wordpress', 'Apache', 'cloudify.interfaces.relationship_lifecycle.establish', 'SOURCE', 'Wordpress_host_add_target', custom_context)
operation_task(ctx, graph, 'Wordpress', 'cloudify.interfaces.lifecycle.configure', 'Wordpress_configure', custom_context)
relationship_operation_task(ctx, graph, 'Wordpress', 'Apache', 'cloudify.interfaces.relationship_lifecycle.preconfigure', 'SOURCE', 'Wordpress_host_pre_configure_source', custom_context)
set_state_task(ctx, graph, 'Wordpress', 'created', 'Wordpress_created', custom_context)
relationship_operation_task(ctx, graph, 'Apache', 'Server', 'cloudify.interfaces.relationship_lifecycle.postconfigure', 'SOURCE', 'Apache_host_post_configure_source', custom_context)
set_state_task(ctx, graph, 'Apache', 'initial', 'Apache_initial', custom_context)
relationship_operation_task(ctx, graph, 'Wordpress', 'PHP', 'cloudify.interfaces.relationship_lifecycle.establish', 'SOURCE', 'Wordpress_php_add_target', custom_context)
set_state_task(ctx, graph, 'Wordpress', 'starting', 'Wordpress_starting', custom_context)
set_state_task(ctx, graph, 'Apache', 'configuring', 'Apache_configuring', custom_context)
set_state_task(ctx, graph, 'Wordpress', 'creating', 'Wordpress_creating', custom_context)
relationship_operation_task(ctx, graph, 'Wordpress', 'Mysql', 'cloudify.interfaces.relationship_lifecycle.establish', 'TARGET', 'Wordpress_database_add_source', custom_context)
operation_task(ctx, graph, 'Apache', 'cloudify.interfaces.lifecycle.configure', 'Apache_configure', custom_context)
operation_task(ctx, graph, 'PHP', 'cloudify.interfaces.lifecycle.create', 'PHP_create', custom_context)
relationship_operation_task(ctx, graph, 'Wordpress', 'Apache', 'cloudify.interfaces.relationship_lifecycle.postconfigure', 'TARGET', 'Wordpress_host_post_configure_target', custom_context)
set_state_task(ctx, graph, 'Wordpress', 'started', 'Wordpress_started', custom_context)
set_state_task(ctx, graph, 'Apache', 'starting', 'Apache_starting', custom_context)
set_state_task(ctx, graph, 'PHP', 'configuring', 'PHP_configuring', custom_context)
set_state_task(ctx, graph, 'PHP', 'configured', 'PHP_configured', custom_context)
relationship_operation_task(ctx, graph, 'Apache', 'Server', 'cloudify.interfaces.relationship_lifecycle.preconfigure', 'SOURCE', 'Apache_host_pre_configure_source', custom_context)
relationship_operation_task(ctx, graph, 'Wordpress', 'Mysql', 'cloudify.interfaces.relationship_lifecycle.establish', 'SOURCE', 'Wordpress_database_add_target', custom_context)
relationship_operation_task(ctx, graph, 'Wordpress', 'Apache', 'cloudify.interfaces.relationship_lifecycle.establish', 'TARGET', 'Wordpress_host_add_source', custom_context)
relationship_operation_task(ctx, graph, 'PHP', 'Server', 'cloudify.interfaces.relationship_lifecycle.postconfigure', 'SOURCE', 'PHP_host_post_configure_source', custom_context)
set_state_task(ctx, graph, 'Apache', 'creating', 'Apache_creating', custom_context)
set_state_task(ctx, graph, 'Wordpress', 'initial', 'Wordpress_initial', custom_context)
set_state_task(ctx, graph, 'Wordpress', 'configuring', 'Wordpress_configuring', custom_context)
operation_task(ctx, graph, 'Apache', 'cloudify.interfaces.lifecycle.start', 'Apache_start', custom_context)
set_state_task(ctx, graph, 'Wordpress', 'configured', 'Wordpress_configured', custom_context)
relationship_operation_task(ctx, graph, 'Wordpress', 'PHP', 'cloudify.interfaces.relationship_lifecycle.preconfigure', 'SOURCE', 'Wordpress_php_pre_configure_source', custom_context)
operation_task(ctx, graph, 'Wordpress', 'cloudify.interfaces.lifecycle.create', 'Wordpress_create', custom_context)
relationship_operation_task(ctx, graph, 'Wordpress', 'PHP', 'cloudify.interfaces.relationship_lifecycle.preconfigure', 'TARGET', 'Wordpress_php_pre_configure_target', custom_context)
relationship_operation_task(ctx, graph, 'Wordpress', 'Mysql', 'cloudify.interfaces.relationship_lifecycle.preconfigure', 'TARGET', 'Wordpress_database_pre_configure_target', custom_context)
relationship_operation_task(ctx, graph, 'Wordpress', 'PHP', 'cloudify.interfaces.relationship_lifecycle.postconfigure', 'SOURCE', 'Wordpress_php_post_configure_source', custom_context)
relationship_operation_task(ctx, graph, 'Wordpress', 'Apache', 'cloudify.interfaces.relationship_lifecycle.postconfigure', 'SOURCE', 'Wordpress_host_post_configure_source', custom_context)
set_state_task(ctx, graph, 'PHP', 'created', 'PHP_created', custom_context)
set_state_task(ctx, graph, 'PHP', 'started', 'PHP_started', custom_context)
operation_task(ctx, graph, 'Apache', 'cloudify.interfaces.lifecycle.create', 'Apache_create', custom_context)
operation_task(ctx, graph, 'PHP', 'cloudify.interfaces.lifecycle.start', 'PHP_start', custom_context)
set_state_task(ctx, graph, 'PHP', 'starting', 'PHP_starting', custom_context)
relationship_operation_task(ctx, graph, 'Apache', 'Server', 'cloudify.interfaces.relationship_lifecycle.establish', 'SOURCE', 'Apache_host_add_target', custom_context)
set_state_task(ctx, graph, 'PHP', 'initial', 'PHP_initial', custom_context)
set_state_task(ctx, graph, 'PHP', 'creating', 'PHP_creating', custom_context)
custom_context.register_native_delegate_wf_step('Server', 'Server_install')
relationship_operation_task(ctx, graph, 'Wordpress', 'Apache', 'cloudify.interfaces.relationship_lifecycle.preconfigure', 'TARGET', 'Wordpress_host_pre_configure_target', custom_context)
relationship_operation_task(ctx, graph, 'Wordpress', 'PHP', 'cloudify.interfaces.relationship_lifecycle.establish', 'TARGET', 'Wordpress_php_add_source', custom_context)
relationship_operation_task(ctx, graph, 'Wordpress', 'PHP', 'cloudify.interfaces.relationship_lifecycle.postconfigure', 'TARGET', 'Wordpress_php_post_configure_target', custom_context)
relationship_operation_task(ctx, graph, 'Wordpress', 'Mysql', 'cloudify.interfaces.relationship_lifecycle.preconfigure', 'SOURCE', 'Wordpress_database_pre_configure_source', custom_context)
operation_task(ctx, graph, 'PHP', 'org.alien4cloud.interfaces.cfy.lifecycle.NodeInit', '_a4c_init_PHP', custom_context)
operation_task(ctx, graph, 'Wordpress', 'org.alien4cloud.interfaces.cfy.lifecycle.NodeInit', '_a4c_init_Wordpress', custom_context)
operation_task(ctx, graph, 'Apache', 'org.alien4cloud.interfaces.cfy.lifecycle.NodeInit', '_a4c_init_Apache', custom_context)
generate_native_node_workflows(ctx, graph, custom_context, 'install')
link_tasks(graph, 'Apache_configuring', 'Apache_created', custom_context)
link_tasks(graph, 'PHP_configure', 'PHP_host_pre_configure_source', custom_context)
link_tasks(graph, 'Wordpress_initial', 'Apache_started', custom_context)
link_tasks(graph, 'Apache_host_add_target', 'Apache_started', custom_context)
link_tasks(graph, 'Apache_starting', 'Apache_configured', custom_context)
link_tasks(graph, 'Wordpress_started', 'Wordpress_start', custom_context)
link_tasks(graph, 'Wordpress_configured', 'Wordpress_database_post_configure_source', custom_context)
link_tasks(graph, 'PHP_host_post_configure_source', 'PHP_configure', custom_context)
link_tasks(graph, 'Wordpress_php_post_configure_target', 'PHP_configure', custom_context)
link_tasks(graph, 'PHP_configured', 'PHP_configure', custom_context)
link_tasks(graph, 'Wordpress_host_post_configure_source', 'Wordpress_configure', custom_context)
link_tasks(graph, 'Wordpress_php_post_configure_source', 'Wordpress_configure', custom_context)
link_tasks(graph, 'Wordpress_configured', 'Wordpress_configure', custom_context)
link_tasks(graph, 'Wordpress_database_post_configure_source', 'Wordpress_configure', custom_context)
link_tasks(graph, 'Wordpress_configure', 'Wordpress_host_pre_configure_source', custom_context)
link_tasks(graph, 'Wordpress_configuring', 'Wordpress_created', custom_context)
link_tasks(graph, 'Apache_configured', 'Apache_host_post_configure_source', custom_context)
link_tasks(graph, 'Apache_creating', 'Apache_initial', custom_context)
link_tasks(graph, 'Wordpress_start', 'Wordpress_starting', custom_context)
link_tasks(graph, 'Wordpress_host_pre_configure_target', 'Apache_configuring', custom_context)
link_tasks(graph, 'Apache_configure', 'Apache_configuring', custom_context)
link_tasks(graph, 'Apache_host_pre_configure_source', 'Apache_configuring', custom_context)
link_tasks(graph, '_a4c_init_Wordpress', 'Wordpress_creating', custom_context)
link_tasks(graph, 'Apache_host_post_configure_source', 'Apache_configure', custom_context)
link_tasks(graph, 'Apache_configured', 'Apache_configure', custom_context)
link_tasks(graph, 'Wordpress_host_post_configure_target', 'Apache_configure', custom_context)
link_tasks(graph, 'PHP_created', 'PHP_create', custom_context)
link_tasks(graph, 'Apache_configured', 'Wordpress_host_post_configure_target', custom_context)
link_tasks(graph, 'Wordpress_host_add_target', 'Wordpress_started', custom_context)
link_tasks(graph, 'Wordpress_database_add_source', 'Wordpress_started', custom_context)
link_tasks(graph, 'Wordpress_php_add_source', 'Wordpress_started', custom_context)
link_tasks(graph, 'Wordpress_database_add_target', 'Wordpress_started', custom_context)
link_tasks(graph, 'Wordpress_php_add_target', 'Wordpress_started', custom_context)
link_tasks(graph, 'Wordpress_host_add_source', 'Wordpress_started', custom_context)
link_tasks(graph, 'Apache_start', 'Apache_starting', custom_context)
link_tasks(graph, 'PHP_host_pre_configure_source', 'PHP_configuring', custom_context)
link_tasks(graph, 'PHP_configure', 'PHP_configuring', custom_context)
link_tasks(graph, 'Wordpress_php_pre_configure_target', 'PHP_configuring', custom_context)
link_tasks(graph, 'PHP_starting', 'PHP_configured', custom_context)
link_tasks(graph, 'Apache_configure', 'Apache_host_pre_configure_source', custom_context)
link_tasks(graph, 'PHP_configured', 'PHP_host_post_configure_source', custom_context)
link_tasks(graph, '_a4c_init_Apache', 'Apache_creating', custom_context)
link_tasks(graph, 'Wordpress_creating', 'Wordpress_initial', custom_context)
link_tasks(graph, 'Wordpress_php_pre_configure_source', 'Wordpress_configuring', custom_context)
link_tasks(graph, 'Wordpress_configure', 'Wordpress_configuring', custom_context)
link_tasks(graph, 'Wordpress_host_pre_configure_source', 'Wordpress_configuring', custom_context)
link_tasks(graph, 'Wordpress_database_pre_configure_source', 'Wordpress_configuring', custom_context)
link_tasks(graph, 'Apache_started', 'Apache_start', custom_context)
link_tasks(graph, 'Wordpress_starting', 'Wordpress_configured', custom_context)
link_tasks(graph, 'Wordpress_configure', 'Wordpress_php_pre_configure_source', custom_context)
link_tasks(graph, 'Wordpress_created', 'Wordpress_create', custom_context)
link_tasks(graph, 'PHP_configure', 'Wordpress_php_pre_configure_target', custom_context)
link_tasks(graph, 'Wordpress_configured', 'Wordpress_php_post_configure_source', custom_context)
link_tasks(graph, 'Wordpress_configured', 'Wordpress_host_post_configure_source', custom_context)
link_tasks(graph, 'PHP_configuring', 'PHP_created', custom_context)
link_tasks(graph, 'PHP_host_add_target', 'PHP_started', custom_context)
link_tasks(graph, 'Wordpress_initial', 'PHP_started', custom_context)
link_tasks(graph, 'Apache_created', 'Apache_create', custom_context)
link_tasks(graph, 'PHP_started', 'PHP_start', custom_context)
link_tasks(graph, 'PHP_start', 'PHP_starting', custom_context)
link_tasks(graph, 'PHP_creating', 'PHP_initial', custom_context)
link_tasks(graph, '_a4c_init_PHP', 'PHP_creating', custom_context)
link_tasks(graph, 'Apache_initial', 'Server_install', custom_context)
link_tasks(graph, 'PHP_initial', 'Server_install', custom_context)
link_tasks(graph, 'Apache_configure', 'Wordpress_host_pre_configure_target', custom_context)
link_tasks(graph, 'PHP_configured', 'Wordpress_php_post_configure_target', custom_context)
link_tasks(graph, 'Wordpress_configure', 'Wordpress_database_pre_configure_source', custom_context)
link_tasks(graph, 'PHP_create', '_a4c_init_PHP', custom_context)
link_tasks(graph, 'Wordpress_create', '_a4c_init_Wordpress', custom_context)
link_tasks(graph, 'Apache_create', '_a4c_init_Apache', custom_context)
# Subworkflow for host
def install_host_database(ctx, graph, custom_context):
custom_context.add_customized_wf_node('Mysql')
custom_context.add_customized_wf_node('Mysql')
custom_context.add_customized_wf_node('Mysql')
custom_context.add_customized_wf_node('Mysql')
custom_context.add_customized_wf_node('Mysql')
custom_context.add_customized_wf_node('Mysql')
custom_context.add_customized_wf_node('Mysql')
relationship_operation_task(ctx, graph, 'Wordpress', 'Mysql', 'cloudify.interfaces.relationship_lifecycle.postconfigure', 'TARGET', 'Wordpress_database_post_configure_target', custom_context)
operation_task(ctx, graph, 'Mysql', 'cloudify.interfaces.lifecycle.configure', 'Mysql_configure', custom_context)
relationship_operation_task(ctx, graph, 'Wordpress', 'Mysql', 'cloudify.interfaces.relationship_lifecycle.postconfigure', 'SOURCE', 'Wordpress_database_post_configure_source', custom_context)
operation_task(ctx, graph, 'Mysql', 'cloudify.interfaces.lifecycle.start', 'Mysql_start', custom_context)
set_state_task(ctx, graph, 'Mysql', 'initial', 'Mysql_initial', custom_context)
set_state_task(ctx, graph, 'Mysql', 'creating', 'Mysql_creating', custom_context)
set_state_task(ctx, graph, 'Mysql', 'configuring', 'Mysql_configuring', custom_context)
relationship_operation_task(ctx, graph, 'Mysql', 'DataBase', 'cloudify.interfaces.relationship_lifecycle.establish', 'SOURCE', 'Mysql_host_add_target', custom_context)
set_state_task(ctx, graph, 'Mysql', 'starting', 'Mysql_starting', custom_context)
custom_context.register_native_delegate_wf_step('DataBase', 'DataBase_install')
relationship_operation_task(ctx, graph, 'Wordpress', 'Mysql', 'cloudify.interfaces.relationship_lifecycle.establish', 'TARGET', 'Wordpress_database_add_source', custom_context)
set_state_task(ctx, graph, 'Mysql', 'started', 'Mysql_started', custom_context)
relationship_operation_task(ctx, graph, 'Wordpress', 'Mysql', 'cloudify.interfaces.relationship_lifecycle.establish', 'SOURCE', 'Wordpress_database_add_target', custom_context)
relationship_operation_task(ctx, graph, 'Mysql', 'DataBase', 'cloudify.interfaces.relationship_lifecycle.preconfigure', 'SOURCE', 'Mysql_host_pre_configure_source', custom_context)
operation_task(ctx, graph, 'Mysql', 'cloudify.interfaces.lifecycle.create', 'Mysql_create', custom_context)
relationship_operation_task(ctx, graph, 'Wordpress', 'Mysql', 'cloudify.interfaces.relationship_lifecycle.preconfigure', 'TARGET', 'Wordpress_database_pre_configure_target', custom_context)
set_state_task(ctx, graph, 'Mysql', 'configured', 'Mysql_configured', custom_context)
relationship_operation_task(ctx, graph, 'Mysql', 'DataBase', 'cloudify.interfaces.relationship_lifecycle.postconfigure', 'SOURCE', 'Mysql_host_post_configure_source', custom_context)
set_state_task(ctx, graph, 'Mysql', 'created', 'Mysql_created', custom_context)
relationship_operation_task(ctx, graph, 'Wordpress', 'Mysql', 'cloudify.interfaces.relationship_lifecycle.preconfigure', 'SOURCE', 'Wordpress_database_pre_configure_source', custom_context)
operation_task(ctx, graph, 'Mysql', 'org.alien4cloud.interfaces.cfy.lifecycle.NodeInit', '_a4c_init_Mysql', custom_context)
generate_native_node_workflows(ctx, graph, custom_context, 'install')
link_tasks(graph, 'Mysql_configured', 'Wordpress_database_post_configure_target', custom_context)
link_tasks(graph, 'Mysql_configured', 'Mysql_configure', custom_context)
link_tasks(graph, 'Mysql_host_post_configure_source', 'Mysql_configure', custom_context)
link_tasks(graph, 'Wordpress_database_post_configure_target', 'Mysql_configure', custom_context)
link_tasks(graph, 'Mysql_started', 'Mysql_start', custom_context)
link_tasks(graph, 'Mysql_creating', 'Mysql_initial', custom_context)
link_tasks(graph, '_a4c_init_Mysql', 'Mysql_creating', custom_context)
link_tasks(graph, 'Wordpress_database_pre_configure_target', 'Mysql_configuring', custom_context)
link_tasks(graph, 'Mysql_configure', 'Mysql_configuring', custom_context)
link_tasks(graph, 'Mysql_host_pre_configure_source', 'Mysql_configuring', custom_context)
link_tasks(graph, 'Mysql_start', 'Mysql_starting', custom_context)
link_tasks(graph, 'Mysql_initial', 'DataBase_install', custom_context)
link_tasks(graph, 'Mysql_host_add_target', 'Mysql_started', custom_context)
link_tasks(graph, 'Mysql_configure', 'Mysql_host_pre_configure_source', custom_context)
link_tasks(graph, 'Mysql_created', 'Mysql_create', custom_context)
link_tasks(graph, 'Mysql_configure', 'Wordpress_database_pre_configure_target', custom_context)
link_tasks(graph, 'Mysql_starting', 'Mysql_configured', custom_context)
link_tasks(graph, 'Mysql_configured', 'Mysql_host_post_configure_source', custom_context)
link_tasks(graph, 'Mysql_configuring', 'Mysql_created', custom_context)
link_tasks(graph, 'Mysql_create', '_a4c_init_Mysql', custom_context)
# Subworkflow for host
def install_host_netpub(ctx, graph, custom_context):
custom_context.register_native_delegate_wf_step('NetPub', 'NetPub_install')
generate_native_node_workflows(ctx, graph, custom_context, 'install')
# Subworkflow for host
def uninstall_host_server(ctx, graph, custom_context):
custom_context.add_customized_wf_node('Wordpress')
custom_context.add_customized_wf_node('Apache')
custom_context.add_customized_wf_node('Apache')
custom_context.add_customized_wf_node('PHP')
custom_context.add_customized_wf_node('PHP')
custom_context.add_customized_wf_node('Wordpress')
custom_context.add_customized_wf_node('Wordpress')
custom_context.add_customized_wf_node('Apache')
custom_context.add_customized_wf_node('Wordpress')
custom_context.add_customized_wf_node('PHP')
custom_context.add_customized_wf_node('PHP')
custom_context.add_customized_wf_node('Apache')
relationship_operation_task(ctx, graph, 'Wordpress', 'PHP', 'cloudify.interfaces.relationship_lifecycle.unlink', 'TARGET', 'Wordpress_php_remove_source', custom_context)
set_state_task(ctx, graph, 'Wordpress', 'deleted', 'Wordpress_deleted', custom_context)
set_state_task(ctx, graph, 'Apache', 'stopped', 'Apache_stopped', custom_context)
set_state_task(ctx, graph, 'Apache', 'stopping', 'Apache_stopping', custom_context)
operation_task(ctx, graph, 'Wordpress', 'cloudify.interfaces.lifecycle.stop', 'Wordpress_stop', custom_context)
relationship_operation_task(ctx, graph, 'Apache', 'Server', 'cloudify.interfaces.relationship_lifecycle.unlink', 'SOURCE', 'Apache_host_remove_target', custom_context)
relationship_operation_task(ctx, graph, 'Wordpress', 'PHP', 'cloudify.interfaces.relationship_lifecycle.unlink', 'SOURCE', 'Wordpress_php_remove_target', custom_context)
set_state_task(ctx, graph, 'PHP', 'stopping', 'PHP_stopping', custom_context)
set_state_task(ctx, graph, 'PHP', 'deleted', 'PHP_deleted', custom_context)
set_state_task(ctx, graph, 'Wordpress', 'stopped', 'Wordpress_stopped', custom_context)
operation_task(ctx, graph, 'PHP', 'cloudify.interfaces.lifecycle.delete', 'PHP_delete', custom_context)
relationship_operation_task(ctx, graph, 'Wordpress', 'Apache', 'cloudify.interfaces.relationship_lifecycle.unlink', 'SOURCE', 'Wordpress_host_remove_target', custom_context)
operation_task(ctx, graph, 'Apache', 'cloudify.interfaces.lifecycle.delete', 'Apache_delete', custom_context)
set_state_task(ctx, graph, 'Wordpress', 'deleting', 'Wordpress_deleting', custom_context)
relationship_operation_task(ctx, graph, 'Wordpress', 'Mysql', 'cloudify.interfaces.relationship_lifecycle.unlink', 'TARGET', 'Wordpress_database_remove_source', custom_context)
operation_task(ctx, graph, 'PHP', 'cloudify.interfaces.lifecycle.stop', 'PHP_stop', custom_context)
set_state_task(ctx, graph, 'Apache', 'deleting', 'Apache_deleting', custom_context)
relationship_operation_task(ctx, graph, 'Wordpress', 'Mysql', 'cloudify.interfaces.relationship_lifecycle.unlink', 'SOURCE', 'Wordpress_database_remove_target', custom_context)
relationship_operation_task(ctx, graph, 'PHP', 'Server', 'cloudify.interfaces.relationship_lifecycle.unlink', 'SOURCE', 'PHP_host_remove_target', custom_context)
set_state_task(ctx, graph, 'Wordpress', 'stopping', 'Wordpress_stopping', custom_context)
set_state_task(ctx, graph, 'PHP', 'deleting', 'PHP_deleting', custom_context)
set_state_task(ctx, graph, 'PHP', 'stopped', 'PHP_stopped', custom_context)
custom_context.register_native_delegate_wf_step('Server', 'Server_uninstall')
relationship_operation_task(ctx, graph, 'Wordpress', 'Apache', 'cloudify.interfaces.relationship_lifecycle.unlink', 'TARGET', 'Wordpress_host_remove_source', custom_context)
operation_task(ctx, graph, 'Apache', 'cloudify.interfaces.lifecycle.stop', 'Apache_stop', custom_context)
set_state_task(ctx, graph, 'Apache', 'deleted', 'Apache_deleted', custom_context)
operation_task(ctx, graph, 'Wordpress', 'cloudify.interfaces.lifecycle.delete', 'Wordpress_delete', custom_context)
generate_native_node_workflows(ctx, graph, custom_context, 'uninstall')
link_tasks(graph, 'Wordpress_stopping', 'Wordpress_php_remove_source', custom_context)
link_tasks(graph, 'Apache_stopping', 'Wordpress_deleted', custom_context)
link_tasks(graph, 'PHP_stopping', 'Wordpress_deleted', custom_context)
link_tasks(graph, 'Apache_deleting', 'Apache_stopped', custom_context)
link_tasks(graph, 'Apache_stop', 'Apache_stopping', custom_context)
link_tasks(graph, 'Wordpress_stopped', 'Wordpress_stop', custom_context)
link_tasks(graph, 'Apache_stopping', 'Apache_host_remove_target', custom_context)
link_tasks(graph, 'Wordpress_stopping', 'Wordpress_php_remove_target', custom_context)
link_tasks(graph, 'PHP_stop', 'PHP_stopping', custom_context)
link_tasks(graph, 'Server_uninstall', 'PHP_deleted', custom_context)
link_tasks(graph, 'Wordpress_deleting', 'Wordpress_stopped', custom_context)
link_tasks(graph, 'PHP_deleted', 'PHP_delete', custom_context)
link_tasks(graph, 'Wordpress_stopping', 'Wordpress_host_remove_target', custom_context)
link_tasks(graph, 'Apache_deleted', 'Apache_delete', custom_context)
link_tasks(graph, 'Wordpress_delete', 'Wordpress_deleting', custom_context)
link_tasks(graph, 'Wordpress_stopping', 'Wordpress_database_remove_source', custom_context)
link_tasks(graph, 'PHP_stopped', 'PHP_stop', custom_context)
link_tasks(graph, 'Apache_delete', 'Apache_deleting', custom_context)
link_tasks(graph, 'Wordpress_stopping', 'Wordpress_database_remove_target', custom_context)
link_tasks(graph, 'PHP_stopping', 'PHP_host_remove_target', custom_context)
link_tasks(graph, 'Wordpress_stop', 'Wordpress_stopping', custom_context)
link_tasks(graph, 'PHP_delete', 'PHP_deleting', custom_context)
link_tasks(graph, 'PHP_deleting', 'PHP_stopped', custom_context)
link_tasks(graph, 'Wordpress_stopping', 'Wordpress_host_remove_source', custom_context)
link_tasks(graph, 'Apache_stopped', 'Apache_stop', custom_context)
link_tasks(graph, 'Server_uninstall', 'Apache_deleted', custom_context)
link_tasks(graph, 'Wordpress_deleted', 'Wordpress_delete', custom_context)
# Subworkflow for host
def uninstall_host_database(ctx, graph, custom_context):
custom_context.add_customized_wf_node('Mysql')
custom_context.add_customized_wf_node('Mysql')
custom_context.add_customized_wf_node('Mysql')
custom_context.add_customized_wf_node('Mysql')
operation_task(ctx, graph, 'Mysql', 'cloudify.interfaces.lifecycle.delete', 'Mysql_delete', custom_context)
set_state_task(ctx, graph, 'Mysql', 'stopping', 'Mysql_stopping', custom_context)
relationship_operation_task(ctx, graph, 'Mysql', 'DataBase', 'cloudify.interfaces.relationship_lifecycle.unlink', 'SOURCE', 'Mysql_host_remove_target', custom_context)
relationship_operation_task(ctx, graph, 'Wordpress', 'Mysql', 'cloudify.interfaces.relationship_lifecycle.unlink', 'TARGET', 'Wordpress_database_remove_source', custom_context)
relationship_operation_task(ctx, graph, 'Wordpress', 'Mysql', 'cloudify.interfaces.relationship_lifecycle.unlink', 'SOURCE', 'Wordpress_database_remove_target', custom_context)
set_state_task(ctx, graph, 'Mysql', 'deleting', 'Mysql_deleting', custom_context)
operation_task(ctx, graph, 'Mysql', 'cloudify.interfaces.lifecycle.stop', 'Mysql_stop', custom_context)
custom_context.register_native_delegate_wf_step('DataBase', 'DataBase_uninstall')
set_state_task(ctx, graph, 'Mysql', 'stopped', 'Mysql_stopped', custom_context)
set_state_task(ctx, graph, 'Mysql', 'deleted', 'Mysql_deleted', custom_context)
generate_native_node_workflows(ctx, graph, custom_context, 'uninstall')
link_tasks(graph, 'Mysql_deleted', 'Mysql_delete', custom_context)
link_tasks(graph, 'Mysql_stop', 'Mysql_stopping', custom_context)
link_tasks(graph, 'Mysql_stopping', 'Mysql_host_remove_target', custom_context)
link_tasks(graph, 'Mysql_delete', 'Mysql_deleting', custom_context)
link_tasks(graph, 'Mysql_stopped', 'Mysql_stop', custom_context)
link_tasks(graph, 'Mysql_deleting', 'Mysql_stopped', custom_context)
link_tasks(graph, 'DataBase_uninstall', 'Mysql_deleted', custom_context)
# Subworkflow for host
def uninstall_host_netpub(ctx, graph, custom_context):
custom_context.register_native_delegate_wf_step('NetPub', 'NetPub_uninstall')
generate_native_node_workflows(ctx, graph, custom_context, 'uninstall')
def install_host(ctx, graph, custom_context, compute):
options = {}
options['Server'] = install_host_server
options['DataBase'] = install_host_database
options['NetPub'] = install_host_netpub
options[compute](ctx, graph, custom_context)
def uninstall_host(ctx, graph, custom_context, compute):
options = {}
options['Server'] = uninstall_host_server
options['DataBase'] = uninstall_host_database
options['NetPub'] = uninstall_host_netpub
options[compute](ctx, graph, custom_context)
@workflow
def a4c_install(**kwargs):
graph = ctx.graph_mode()
nodes = _get_all_nodes(ctx)
instances = _get_all_nodes_instances(ctx)
custom_context = CustomContext(ctx, instances, nodes)
ctx.internal.send_workflow_event(event_type='a4c_workflow_started', message=build_pre_event(WfStartEvent('install')))
_a4c_install(ctx, graph, custom_context)
return graph.execute()
@workflow
def a4c_uninstall(**kwargs):
graph = ctx.graph_mode()
nodes = _get_all_nodes(ctx)
instances = _get_all_nodes_instances(ctx)
custom_context = CustomContext(ctx, instances, nodes)
ctx.internal.send_workflow_event(event_type='a4c_workflow_started', message=build_pre_event(WfStartEvent('uninstall')))
_a4c_uninstall(ctx, graph, custom_context)
return graph.execute()
@workflow
def a4c_start(**kwargs):
graph = ctx.graph_mode()
nodes = _get_all_nodes(ctx)
instances = _get_all_nodes_instances(ctx)
custom_context = CustomContext(ctx, instances, nodes)
ctx.internal.send_workflow_event(event_type='a4c_workflow_started', message=build_pre_event(WfStartEvent('start')))
_a4c_start(ctx, graph, custom_context)
return graph.execute()
@workflow
def a4c_stop(**kwargs):
graph = ctx.graph_mode()
nodes = _get_all_nodes(ctx)
instances = _get_all_nodes_instances(ctx)
custom_context = CustomContext(ctx, instances, nodes)
ctx.internal.send_workflow_event(event_type='a4c_workflow_started', message=build_pre_event(WfStartEvent('stop')))
_a4c_stop(ctx, graph, custom_context)
return graph.execute()
def _a4c_install(ctx, graph, custom_context):
# following code can be pasted in src/test/python/workflows/tasks.py for simulation
custom_context.add_customized_wf_node('Apache')
custom_context.add_customized_wf_node('Apache')
custom_context.add_customized_wf_node('Apache')
custom_context.add_customized_wf_node('Mysql')
custom_context.add_customized_wf_node('Mysql')
custom_context.add_customized_wf_node('Mysql')
custom_context.add_customized_wf_node('Wordpress')
custom_context.add_customized_wf_node('Apache')
custom_context.add_customized_wf_node('Mysql')
custom_context.add_customized_wf_node('Wordpress')
custom_context.add_customized_wf_node('Apache')
custom_context.add_customized_wf_node('Wordpress')
custom_context.add_customized_wf_node('Mysql')
custom_context.add_customized_wf_node('Wordpress')
custom_context.add_customized_wf_node('Apache')
custom_context.add_customized_wf_node('PHP')
custom_context.add_customized_wf_node('PHP')
custom_context.add_customized_wf_node('Apache')
custom_context.add_customized_wf_node('Wordpress')
custom_context.add_customized_wf_node('Wordpress')
custom_context.add_customized_wf_node('Wordpress')
custom_context.add_customized_wf_node('Mysql')
custom_context.add_customized_wf_node('PHP')
custom_context.add_customized_wf_node('PHP')
custom_context.add_customized_wf_node('PHP')
custom_context.add_customized_wf_node('PHP')
custom_context.add_customized_wf_node('PHP')
custom_context.add_customized_wf_node('Mysql')
set_state_task(ctx, graph, 'Apache', 'created', 'Apache_created', custom_context)
relationship_operation_task(ctx, graph, 'PHP', 'Server', 'cloudify.interfaces.relationship_lifecycle.preconfigure', 'SOURCE', 'PHP_host_pre_configure_source', custom_context)
set_state_task(ctx, graph, 'Apache', 'started', 'Apache_started', custom_context)
set_state_task(ctx, graph, 'Apache', 'configured', 'Apache_configured', custom_context)
relationship_operation_task(ctx, graph, 'Wordpress', 'Mysql', 'cloudify.interfaces.relationship_lifecycle.postconfigure', 'TARGET', 'Wordpress_database_post_configure_target', custom_context)
relationship_operation_task(ctx, graph, 'PHP', 'Server', 'cloudify.interfaces.relationship_lifecycle.establish', 'SOURCE', 'PHP_host_add_target', custom_context)
operation_task(ctx, graph, 'Wordpress', 'cloudify.interfaces.lifecycle.start', 'Wordpress_start', custom_context)
operation_task(ctx, graph, 'Mysql', 'cloudify.interfaces.lifecycle.configure', 'Mysql_configure', custom_context)
relationship_operation_task(ctx, graph, 'Wordpress', 'Mysql', 'cloudify.interfaces.relationship_lifecycle.postconfigure', 'SOURCE', 'Wordpress_database_post_configure_source', custom_context)
operation_task(ctx, graph, 'Mysql', 'cloudify.interfaces.lifecycle.start', 'Mysql_start', custom_context)
set_state_task(ctx, graph, 'Mysql', 'initial', 'Mysql_initial', custom_context)
set_state_task(ctx, graph, 'Mysql', 'creating', 'Mysql_creating', custom_context)
operation_task(ctx, graph, 'PHP', 'cloudify.interfaces.lifecycle.configure', 'PHP_configure', custom_context)
relationship_operation_task(ctx, graph, 'Wordpress', 'Apache', 'cloudify.interfaces.relationship_lifecycle.establish', 'SOURCE', 'Wordpress_host_add_target', custom_context)
operation_task(ctx, graph, 'Wordpress', 'cloudify.interfaces.lifecycle.configure', 'Wordpress_configure', custom_context)
relationship_operation_task(ctx, graph, 'Wordpress', 'Apache', 'cloudify.interfaces.relationship_lifecycle.preconfigure', 'SOURCE', 'Wordpress_host_pre_configure_source', custom_context)
set_state_task(ctx, graph, 'Mysql', 'configuring', 'Mysql_configuring', custom_context)
set_state_task(ctx, graph, 'Wordpress', 'created', 'Wordpress_created', custom_context)
relationship_operation_task(ctx, graph, 'Apache', 'Server', 'cloudify.interfaces.relationship_lifecycle.postconfigure', 'SOURCE', 'Apache_host_post_configure_source', custom_context)
set_state_task(ctx, graph, 'Apache', 'initial', 'Apache_initial', custom_context)
relationship_operation_task(ctx, graph, 'Mysql', 'DataBase', 'cloudify.interfaces.relationship_lifecycle.establish', 'SOURCE', 'Mysql_host_add_target', custom_context)
set_state_task(ctx, graph, 'Mysql', 'starting', 'Mysql_starting', custom_context)
relationship_operation_task(ctx, graph, 'Wordpress', 'PHP', 'cloudify.interfaces.relationship_lifecycle.establish', 'SOURCE', 'Wordpress_php_add_target', custom_context)
set_state_task(ctx, graph, 'Wordpress', 'starting', 'Wordpress_starting', custom_context)
custom_context.register_native_delegate_wf_step('NetPub', 'NetPub_install')
set_state_task(ctx, graph, 'Apache', 'configuring', 'Apache_configuring', custom_context)
custom_context.register_native_delegate_wf_step('DataBase', 'DataBase_install')
set_state_task(ctx, graph, 'Wordpress', 'creating', 'Wordpress_creating', custom_context)
relationship_operation_task(ctx, graph, 'Wordpress', 'Mysql', 'cloudify.interfaces.relationship_lifecycle.establish', 'TARGET', 'Wordpress_database_add_source', custom_context)
operation_task(ctx, graph, 'Apache', 'cloudify.interfaces.lifecycle.configure', 'Apache_configure', custom_context)
operation_task(ctx, graph, 'PHP', 'cloudify.interfaces.lifecycle.create', 'PHP_create', custom_context)
set_state_task(ctx, graph, 'Mysql', 'started', 'Mysql_started', custom_context)
relationship_operation_task(ctx, graph, 'Wordpress', 'Apache', 'cloudify.interfaces.relationship_lifecycle.postconfigure', 'TARGET', 'Wordpress_host_post_configure_target', custom_context)
set_state_task(ctx, graph, 'Wordpress', 'started', 'Wordpress_started', custom_context)
set_state_task(ctx, graph, 'Apache', 'starting', 'Apache_starting', custom_context)
set_state_task(ctx, graph, 'PHP', 'configuring', 'PHP_configuring', custom_context)
set_state_task(ctx, graph, 'PHP', 'configured', 'PHP_configured', custom_context)
relationship_operation_task(ctx, graph, 'Apache', 'Server', 'cloudify.interfaces.relationship_lifecycle.preconfigure', 'SOURCE', 'Apache_host_pre_configure_source', custom_context)
relationship_operation_task(ctx, graph, 'Wordpress', 'Mysql', 'cloudify.interfaces.relationship_lifecycle.establish', 'SOURCE', 'Wordpress_database_add_target', custom_context)
relationship_operation_task(ctx, graph, 'Wordpress', 'Apache', 'cloudify.interfaces.relationship_lifecycle.establish', 'TARGET', 'Wordpress_host_add_source', custom_context)
relationship_operation_task(ctx, graph, 'PHP', 'Server', 'cloudify.interfaces.relationship_lifecycle.postconfigure', 'SOURCE', 'PHP_host_post_configure_source', custom_context)
set_state_task(ctx, graph, 'Apache', 'creating', 'Apache_creating', custom_context)
set_state_task(ctx, graph, 'Wordpress', 'initial', 'Wordpress_initial', custom_context)
set_state_task(ctx, graph, 'Wordpress', 'configuring', 'Wordpress_configuring', custom_context)
operation_task(ctx, graph, 'Apache', 'cloudify.interfaces.lifecycle.start', 'Apache_start', custom_context)
set_state_task(ctx, graph, 'Wordpress', 'configured', 'Wordpress_configured', custom_context)
relationship_operation_task(ctx, graph, 'Wordpress', 'PHP', 'cloudify.interfaces.relationship_lifecycle.preconfigure', 'SOURCE', 'Wordpress_php_pre_configure_source', custom_context)
operation_task(ctx, graph, 'Wordpress', 'cloudify.interfaces.lifecycle.create', 'Wordpress_create', custom_context)
relationship_operation_task(ctx, graph, 'Mysql', 'DataBase', 'cloudify.interfaces.relationship_lifecycle.preconfigure', 'SOURCE', 'Mysql_host_pre_configure_source', custom_context)
operation_task(ctx, graph, 'Mysql', 'cloudify.interfaces.lifecycle.create', 'Mysql_create', custom_context)
relationship_operation_task(ctx, graph, 'Wordpress', 'PHP', 'cloudify.interfaces.relationship_lifecycle.preconfigure', 'TARGET', 'Wordpress_php_pre_configure_target', custom_context)
relationship_operation_task(ctx, graph, 'Wordpress', 'Mysql', 'cloudify.interfaces.relationship_lifecycle.preconfigure', 'TARGET', 'Wordpress_database_pre_configure_target', custom_context)
relationship_operation_task(ctx, graph, 'Wordpress', 'PHP', 'cloudify.interfaces.relationship_lifecycle.postconfigure', 'SOURCE', 'Wordpress_php_post_configure_source', custom_context)
set_state_task(ctx, graph, 'Mysql', 'configured', 'Mysql_configured', custom_context)
relationship_operation_task(ctx, graph, 'Wordpress', 'Apache', 'cloudify.interfaces.relationship_lifecycle.postconfigure', 'SOURCE', 'Wordpress_host_post_configure_source', custom_context)
set_state_task(ctx, graph, 'PHP', 'created', 'PHP_created', custom_context)
relationship_operation_task(ctx, graph, 'Mysql', 'DataBase', 'cloudify.interfaces.relationship_lifecycle.postconfigure', 'SOURCE', 'Mysql_host_post_configure_source', custom_context)
set_state_task(ctx, graph, 'PHP', 'started', 'PHP_started', custom_context)
operation_task(ctx, graph, 'Apache', 'cloudify.interfaces.lifecycle.create', 'Apache_create', custom_context)
operation_task(ctx, graph, 'PHP', 'cloudify.interfaces.lifecycle.start', 'PHP_start', custom_context)
set_state_task(ctx, graph, 'PHP', 'starting', 'PHP_starting', custom_context)
relationship_operation_task(ctx, graph, 'Apache', 'Server', 'cloudify.interfaces.relationship_lifecycle.establish', 'SOURCE', 'Apache_host_add_target', custom_context)
set_state_task(ctx, graph, 'PHP', 'initial', 'PHP_initial', custom_context)
set_state_task(ctx, graph, 'PHP', 'creating', 'PHP_creating', custom_context)
set_state_task(ctx, graph, 'Mysql', 'created', 'Mysql_created', custom_context)
custom_context.register_native_delegate_wf_step('Server', 'Server_install')
relationship_operation_task(ctx, graph, 'Wordpress', 'Apache', 'cloudify.interfaces.relationship_lifecycle.preconfigure', 'TARGET', 'Wordpress_host_pre_configure_target', custom_context)
relationship_operation_task(ctx, graph, 'Wordpress', 'PHP', 'cloudify.interfaces.relationship_lifecycle.establish', 'TARGET', 'Wordpress_php_add_source', custom_context)
relationship_operation_task(ctx, graph, 'Wordpress', 'PHP', 'cloudify.interfaces.relationship_lifecycle.postconfigure', 'TARGET', 'Wordpress_php_post_configure_target', custom_context)
relationship_operation_task(ctx, graph, 'Wordpress', 'Mysql', 'cloudify.interfaces.relationship_lifecycle.preconfigure', 'SOURCE', 'Wordpress_database_pre_configure_source', custom_context)
operation_task(ctx, graph, 'PHP', 'org.alien4cloud.interfaces.cfy.lifecycle.NodeInit', '_a4c_init_PHP', custom_context)
operation_task(ctx, graph, 'Mysql', 'org.alien4cloud.interfaces.cfy.lifecycle.NodeInit', '_a4c_init_Mysql', custom_context)
operation_task(ctx, graph, 'Wordpress', 'org.alien4cloud.interfaces.cfy.lifecycle.NodeInit', '_a4c_init_Wordpress', custom_context)
operation_task(ctx, graph, 'Apache', 'org.alien4cloud.interfaces.cfy.lifecycle.NodeInit', '_a4c_init_Apache', custom_context)
generate_native_node_workflows(ctx, graph, custom_context, 'install')
link_tasks(graph, 'Apache_created', 'Apache_create', custom_context)
link_tasks(graph, 'PHP_host_pre_configure_source', 'PHP_configuring', custom_context)
link_tasks(graph, 'Apache_started', 'Apache_start', custom_context)
link_tasks(graph, 'Apache_configured', 'Apache_configure', custom_context)
link_tasks(graph, 'Apache_configured', 'Apache_host_post_configure_source', custom_context)
link_tasks(graph, 'Apache_configured', 'Wordpress_host_post_configure_target', custom_context)
link_tasks(graph, 'Wordpress_database_post_configure_target', 'Mysql_configure', custom_context)
link_tasks(graph, 'PHP_host_add_target', 'PHP_started', custom_context)
link_tasks(graph, 'Wordpress_start', 'Wordpress_starting', custom_context)
link_tasks(graph, 'Mysql_configure', 'Mysql_configuring', custom_context)
link_tasks(graph, 'Mysql_configure', 'Wordpress_database_pre_configure_target', custom_context)
link_tasks(graph, 'Mysql_configure', 'Mysql_host_pre_configure_source', custom_context)
link_tasks(graph, 'Wordpress_database_post_configure_source', 'Wordpress_configure', custom_context)
link_tasks(graph, 'Mysql_start', 'Mysql_starting', custom_context)
link_tasks(graph, 'Mysql_initial', 'DataBase_install', custom_context)
link_tasks(graph, 'Mysql_creating', 'Mysql_initial', custom_context)
link_tasks(graph, 'PHP_configure', 'PHP_host_pre_configure_source', custom_context)
link_tasks(graph, 'PHP_configure', 'Wordpress_php_pre_configure_target', custom_context)
link_tasks(graph, 'PHP_configure', 'PHP_configuring', custom_context)
link_tasks(graph, 'Wordpress_host_add_target', 'Wordpress_started', custom_context)
link_tasks(graph, 'Wordpress_configure', 'Wordpress_php_pre_configure_source', custom_context)
link_tasks(graph, 'Wordpress_configure', 'Wordpress_host_pre_configure_source', custom_context)
link_tasks(graph, 'Wordpress_configure', 'Wordpress_configuring', custom_context)
link_tasks(graph, 'Wordpress_configure', 'Wordpress_database_pre_configure_source', custom_context)
link_tasks(graph, 'Wordpress_host_pre_configure_source', 'Wordpress_configuring', custom_context)
link_tasks(graph, 'Mysql_configuring', 'Mysql_created', custom_context)
link_tasks(graph, 'Wordpress_created', 'Wordpress_create', custom_context)
link_tasks(graph, 'Apache_host_post_configure_source', 'Apache_configure', custom_context)
link_tasks(graph, 'Apache_initial', 'Server_install', custom_context)
link_tasks(graph, 'Mysql_host_add_target', 'Mysql_started', custom_context)
link_tasks(graph, 'Mysql_starting', 'Mysql_configured', custom_context)
link_tasks(graph, 'Wordpress_php_add_target', 'Wordpress_started', custom_context)
link_tasks(graph, 'Wordpress_starting', 'Wordpress_configured', custom_context)
link_tasks(graph, 'Apache_configuring', 'Apache_created', custom_context)
link_tasks(graph, 'Wordpress_creating', 'Wordpress_initial', custom_context)
link_tasks(graph, 'Wordpress_database_add_source', 'Wordpress_started', custom_context)
link_tasks(graph, 'Apache_configure', 'Apache_configuring', custom_context)
link_tasks(graph, 'Apache_configure', 'Wordpress_host_pre_configure_target', custom_context)
link_tasks(graph, 'Apache_configure', 'Apache_host_pre_configure_source', custom_context)
link_tasks(graph, 'PHP_create', '_a4c_init_PHP', custom_context)
link_tasks(graph, 'Mysql_started', 'Mysql_start', custom_context)
link_tasks(graph, 'Wordpress_host_post_configure_target', 'Apache_configure', custom_context)
link_tasks(graph, 'Wordpress_started', 'Wordpress_start', custom_context)
link_tasks(graph, 'Apache_starting', 'Apache_configured', custom_context)
link_tasks(graph, 'PHP_configuring', 'PHP_created', custom_context)
link_tasks(graph, 'PHP_configured', 'PHP_configure', custom_context)
link_tasks(graph, 'PHP_configured', 'PHP_host_post_configure_source', custom_context)
link_tasks(graph, 'PHP_configured', 'Wordpress_php_post_configure_target', custom_context)
link_tasks(graph, 'Apache_host_pre_configure_source', 'Apache_configuring', custom_context)
link_tasks(graph, 'Wordpress_database_add_target', 'Wordpress_started', custom_context)
link_tasks(graph, 'Wordpress_host_add_source', 'Wordpress_started', custom_context)
link_tasks(graph, 'PHP_host_post_configure_source', 'PHP_configure', custom_context)
link_tasks(graph, 'Apache_creating', 'Apache_initial', custom_context)
link_tasks(graph, 'Wordpress_initial', 'Apache_started', custom_context)
link_tasks(graph, 'Wordpress_initial', 'Mysql_started', custom_context)
link_tasks(graph, 'Wordpress_initial', 'PHP_started', custom_context)
link_tasks(graph, 'Wordpress_configuring', 'Wordpress_created', custom_context)
link_tasks(graph, 'Apache_start', 'Apache_starting', custom_context)
link_tasks(graph, 'Wordpress_configured', 'Wordpress_host_post_configure_source', custom_context)
link_tasks(graph, 'Wordpress_configured', 'Wordpress_configure', custom_context)
link_tasks(graph, 'Wordpress_configured', 'Wordpress_php_post_configure_source', custom_context)
link_tasks(graph, 'Wordpress_configured', 'Wordpress_database_post_configure_source', custom_context)
link_tasks(graph, 'Wordpress_php_pre_configure_source', 'Wordpress_configuring', custom_context)
link_tasks(graph, 'Wordpress_create', '_a4c_init_Wordpress', custom_context)
link_tasks(graph, 'Mysql_host_pre_configure_source', 'Mysql_configuring', custom_context)
link_tasks(graph, 'Mysql_create', '_a4c_init_Mysql', custom_context)
link_tasks(graph, 'Wordpress_php_pre_configure_target', 'PHP_configuring', custom_context)
link_tasks(graph, 'Wordpress_database_pre_configure_target', 'Mysql_configuring', custom_context)
link_tasks(graph, 'Wordpress_php_post_configure_source', 'Wordpress_configure', custom_context)
link_tasks(graph, 'Mysql_configured', 'Mysql_host_post_configure_source', custom_context)
link_tasks(graph, 'Mysql_configured', 'Wordpress_database_post_configure_target', custom_context)
link_tasks(graph, 'Mysql_configured', 'Mysql_configure', custom_context)
link_tasks(graph, 'Wordpress_host_post_configure_source', 'Wordpress_configure', custom_context)
link_tasks(graph, 'PHP_created', 'PHP_create', custom_context)
link_tasks(graph, 'Mysql_host_post_configure_source', 'Mysql_configure', custom_context)
link_tasks(graph, 'PHP_started', 'PHP_start', custom_context)
link_tasks(graph, 'Apache_create', '_a4c_init_Apache', custom_context)
link_tasks(graph, 'PHP_start', 'PHP_starting', custom_context)
link_tasks(graph, 'PHP_starting', 'PHP_configured', custom_context)
link_tasks(graph, 'Apache_host_add_target', 'Apache_started', custom_context)
link_tasks(graph, 'PHP_initial', 'Server_install', custom_context)
link_tasks(graph, 'PHP_creating', 'PHP_initial', custom_context)
link_tasks(graph, 'Mysql_created', 'Mysql_create', custom_context)
link_tasks(graph, 'Wordpress_host_pre_configure_target', 'Apache_configuring', custom_context)
link_tasks(graph, 'Wordpress_php_add_source', 'Wordpress_started', custom_context)
link_tasks(graph, 'Wordpress_php_post_configure_target', 'PHP_configure', custom_context)
link_tasks(graph, 'Wordpress_database_pre_configure_source', 'Wordpress_configuring', custom_context)
link_tasks(graph, '_a4c_init_PHP', 'PHP_creating', custom_context)
link_tasks(graph, '_a4c_init_Mysql', 'Mysql_creating', custom_context)
link_tasks(graph, '_a4c_init_Wordpress', 'Wordpress_creating', custom_context)
link_tasks(graph, '_a4c_init_Apache', 'Apache_creating', custom_context)
def _a4c_uninstall(ctx, graph, custom_context):
# following code can be pasted in src/test/python/workflows/tasks.py for simulation
custom_context.add_customized_wf_node('Wordpress')
custom_context.add_customized_wf_node('Apache')
custom_context.add_customized_wf_node('Apache')
custom_context.add_customized_wf_node('PHP')
custom_context.add_customized_wf_node('PHP')
custom_context.add_customized_wf_node('Wordpress')
custom_context.add_customized_wf_node('Mysql')
custom_context.add_customized_wf_node('Wordpress')
custom_context.add_customized_wf_node('Apache')
custom_context.add_customized_wf_node('Wordpress')
custom_context.add_customized_wf_node('PHP')
custom_context.add_customized_wf_node('Mysql')
custom_context.add_customized_wf_node('PHP')
custom_context.add_customized_wf_node('Mysql')
custom_context.add_customized_wf_node('Apache')
custom_context.add_customized_wf_node('Mysql')
relationship_operation_task(ctx, graph, 'Wordpress', 'PHP', 'cloudify.interfaces.relationship_lifecycle.unlink', 'TARGET', 'Wordpress_php_remove_source', custom_context)
set_state_task(ctx, graph, 'Wordpress', 'deleted', 'Wordpress_deleted', custom_context)
set_state_task(ctx, graph, 'Apache', 'stopped', 'Apache_stopped', custom_context)
set_state_task(ctx, graph, 'Apache', 'stopping', 'Apache_stopping', custom_context)
operation_task(ctx, graph, 'Wordpress', 'cloudify.interfaces.lifecycle.stop', 'Wordpress_stop', custom_context)
operation_task(ctx, graph, 'Mysql', 'cloudify.interfaces.lifecycle.delete', 'Mysql_delete', custom_context)
relationship_operation_task(ctx, graph, 'Apache', 'Server', 'cloudify.interfaces.relationship_lifecycle.unlink', 'SOURCE', 'Apache_host_remove_target', custom_context)
relationship_operation_task(ctx, graph, 'Wordpress', 'PHP', 'cloudify.interfaces.relationship_lifecycle.unlink', 'SOURCE', 'Wordpress_php_remove_target', custom_context)
set_state_task(ctx, graph, 'PHP', 'stopping', 'PHP_stopping', custom_context)
custom_context.register_native_delegate_wf_step('NetPub', 'NetPub_uninstall')
set_state_task(ctx, graph, 'PHP', 'deleted', 'PHP_deleted', custom_context)
set_state_task(ctx, graph, 'Wordpress', 'stopped', 'Wordpress_stopped', custom_context)
operation_task(ctx, graph, 'PHP', 'cloudify.interfaces.lifecycle.delete', 'PHP_delete', custom_context)
set_state_task(ctx, graph, 'Mysql', 'stopping', 'Mysql_stopping', custom_context)
relationship_operation_task(ctx, graph, 'Mysql', 'DataBase', 'cloudify.interfaces.relationship_lifecycle.unlink', 'SOURCE', 'Mysql_host_remove_target', custom_context)
relationship_operation_task(ctx, graph, 'Wordpress', 'Apache', 'cloudify.interfaces.relationship_lifecycle.unlink', 'SOURCE', 'Wordpress_host_remove_target', custom_context)
operation_task(ctx, graph, 'Apache', 'cloudify.interfaces.lifecycle.delete', 'Apache_delete', custom_context)
set_state_task(ctx, graph, 'Wordpress', 'deleting', 'Wordpress_deleting', custom_context)
relationship_operation_task(ctx, graph, 'Wordpress', 'Mysql', 'cloudify.interfaces.relationship_lifecycle.unlink', 'TARGET', 'Wordpress_database_remove_source', custom_context)
operation_task(ctx, graph, 'PHP', 'cloudify.interfaces.lifecycle.stop', 'PHP_stop', custom_context)
set_state_task(ctx, graph, 'Apache', 'deleting', 'Apache_deleting', custom_context)
relationship_operation_task(ctx, graph, 'Wordpress', 'Mysql', 'cloudify.interfaces.relationship_lifecycle.unlink', 'SOURCE', 'Wordpress_database_remove_target', custom_context)
relationship_operation_task(ctx, graph, 'PHP', 'Server', 'cloudify.interfaces.relationship_lifecycle.unlink', 'SOURCE', 'PHP_host_remove_target', custom_context)
set_state_task(ctx, graph, 'Wordpress', 'stopping', 'Wordpress_stopping', custom_context)
set_state_task(ctx, graph, 'PHP', 'deleting', 'PHP_deleting', custom_context)
set_state_task(ctx, graph, 'Mysql', 'deleting', 'Mysql_deleting', custom_context)
set_state_task(ctx, graph, 'PHP', 'stopped', 'PHP_stopped', custom_context)
operation_task(ctx, graph, 'Mysql', 'cloudify.interfaces.lifecycle.stop', 'Mysql_stop', custom_context)
custom_context.register_native_delegate_wf_step('Server', 'Server_uninstall')
relationship_operation_task(ctx, graph, 'Wordpress', 'Apache', 'cloudify.interfaces.relationship_lifecycle.unlink', 'TARGET', 'Wordpress_host_remove_source', custom_context)
operation_task(ctx, graph, 'Apache', 'cloudify.interfaces.lifecycle.stop', 'Apache_stop', custom_context)
custom_context.register_native_delegate_wf_step('DataBase', 'DataBase_uninstall')
set_state_task(ctx, graph, 'Mysql', 'stopped', 'Mysql_stopped', custom_context)
set_state_task(ctx, graph, 'Apache', 'deleted', 'Apache_deleted', custom_context)
set_state_task(ctx, graph, 'Mysql', 'deleted', 'Mysql_deleted', custom_context)
operation_task(ctx, graph, 'Wordpress', 'cloudify.interfaces.lifecycle.delete', 'Wordpress_delete', custom_context)
generate_native_node_workflows(ctx, graph, custom_context, 'uninstall')
link_tasks(graph, 'Wordpress_deleted', 'Wordpress_delete', custom_context)
link_tasks(graph, 'Apache_stopped', 'Apache_stop', custom_context)
link_tasks(graph, 'Apache_stopping', 'Wordpress_deleted', custom_context)
link_tasks(graph, 'Apache_stopping', 'Apache_host_remove_target', custom_context)
link_tasks(graph, 'Wordpress_stop', 'Wordpress_stopping', custom_context)
link_tasks(graph, 'Mysql_delete', 'Mysql_deleting', custom_context)
link_tasks(graph, 'PHP_stopping', 'PHP_host_remove_target', custom_context)
link_tasks(graph, 'PHP_stopping', 'Wordpress_deleted', custom_context)
link_tasks(graph, 'PHP_deleted', 'PHP_delete', custom_context)
link_tasks(graph, 'Wordpress_stopped', 'Wordpress_stop', custom_context)
link_tasks(graph, 'PHP_delete', 'PHP_deleting', custom_context)
link_tasks(graph, 'Mysql_stopping', 'Wordpress_deleted', custom_context)
link_tasks(graph, 'Mysql_stopping', 'Mysql_host_remove_target', custom_context)
link_tasks(graph, 'Apache_delete', 'Apache_deleting', custom_context)
link_tasks(graph, 'Wordpress_deleting', 'Wordpress_stopped', custom_context)
link_tasks(graph, 'PHP_stop', 'PHP_stopping', custom_context)
link_tasks(graph, 'Apache_deleting', 'Apache_stopped', custom_context)
link_tasks(graph, 'Wordpress_stopping', 'Wordpress_host_remove_source', custom_context)
link_tasks(graph, 'Wordpress_stopping', 'Wordpress_database_remove_target', custom_context)
link_tasks(graph, 'Wordpress_stopping', 'Wordpress_php_remove_source', custom_context)
link_tasks(graph, 'Wordpress_stopping', 'Wordpress_host_remove_target', custom_context)
link_tasks(graph, 'Wordpress_stopping', 'Wordpress_database_remove_source', custom_context)
link_tasks(graph, 'Wordpress_stopping', 'Wordpress_php_remove_target', custom_context)
link_tasks(graph, 'PHP_deleting', 'PHP_stopped', custom_context)
link_tasks(graph, 'Mysql_deleting', 'Mysql_stopped', custom_context)
link_tasks(graph, 'PHP_stopped', 'PHP_stop', custom_context)
link_tasks(graph, 'Mysql_stop', 'Mysql_stopping', custom_context)
link_tasks(graph, 'Server_uninstall', 'Apache_deleted', custom_context)
link_tasks(graph, 'Server_uninstall', 'PHP_deleted', custom_context)
link_tasks(graph, 'Apache_stop', 'Apache_stopping', custom_context)
link_tasks(graph, 'DataBase_uninstall', 'Mysql_deleted', custom_context)
link_tasks(graph, 'Mysql_stopped', 'Mysql_stop', custom_context)
link_tasks(graph, 'Apache_deleted', 'Apache_delete', custom_context)
link_tasks(graph, 'Mysql_deleted', 'Mysql_delete', custom_context)
link_tasks(graph, 'Wordpress_delete', 'Wordpress_deleting', custom_context)
def _a4c_start(ctx, graph, custom_context):
# following code can be pasted in src/test/python/workflows/tasks.py for simulation
custom_context.add_customized_wf_node('Apache')
custom_context.add_customized_wf_node('Mysql')
custom_context.add_customized_wf_node('PHP')
custom_context.add_customized_wf_node('Wordpress')
custom_context.add_customized_wf_node('Apache')
custom_context.add_customized_wf_node('PHP')
custom_context.add_customized_wf_node('Mysql')
custom_context.add_customized_wf_node('Wordpress')
set_state_task(ctx, graph, 'Apache', 'started', 'Apache_started', custom_context)
relationship_operation_task(ctx, graph, 'Wordpress', 'Mysql', 'cloudify.interfaces.relationship_lifecycle.establish', 'TARGET', 'Wordpress_database_add_source', custom_context)
set_state_task(ctx, graph, 'Mysql', 'started', 'Mysql_started', custom_context)
set_state_task(ctx, graph, 'PHP', 'started', 'PHP_started', custom_context)
custom_context.register_native_delegate_wf_step('DataBase', 'DataBase_start')
relationship_operation_task(ctx, graph, 'PHP', 'Server', 'cloudify.interfaces.relationship_lifecycle.establish', 'SOURCE', 'PHP_host_add_target', custom_context)
set_state_task(ctx, graph, 'Wordpress', 'started', 'Wordpress_started', custom_context)
operation_task(ctx, graph, 'Wordpress', 'cloudify.interfaces.lifecycle.start', 'Wordpress_start', custom_context)
set_state_task(ctx, graph, 'Apache', 'starting', 'Apache_starting', custom_context)
operation_task(ctx, graph, 'PHP', 'cloudify.interfaces.lifecycle.start', 'PHP_start', custom_context)
set_state_task(ctx, graph, 'PHP', 'starting', 'PHP_starting', custom_context)
relationship_operation_task(ctx, graph, 'Apache', 'Server', 'cloudify.interfaces.relationship_lifecycle.establish', 'SOURCE', 'Apache_host_add_target', custom_context)
relationship_operation_task(ctx, graph, 'Wordpress', 'Mysql', 'cloudify.interfaces.relationship_lifecycle.establish', 'SOURCE', 'Wordpress_database_add_target', custom_context)
operation_task(ctx, graph, 'Mysql', 'cloudify.interfaces.lifecycle.start', 'Mysql_start', custom_context)
relationship_operation_task(ctx, graph, 'Wordpress', 'Apache', 'cloudify.interfaces.relationship_lifecycle.establish', 'TARGET', 'Wordpress_host_add_source', custom_context)
custom_context.register_native_delegate_wf_step('Server', 'Server_start')
relationship_operation_task(ctx, graph, 'Wordpress', 'Apache', 'cloudify.interfaces.relationship_lifecycle.establish', 'SOURCE', 'Wordpress_host_add_target', custom_context)
relationship_operation_task(ctx, graph, 'Wordpress', 'PHP', 'cloudify.interfaces.relationship_lifecycle.establish', 'TARGET', 'Wordpress_php_add_source', custom_context)
relationship_operation_task(ctx, graph, 'Mysql', 'DataBase', 'cloudify.interfaces.relationship_lifecycle.establish', 'SOURCE', 'Mysql_host_add_target', custom_context)
custom_context.register_native_delegate_wf_step('NetPub', 'NetPub_start')
operation_task(ctx, graph, 'Apache', 'cloudify.interfaces.lifecycle.start', 'Apache_start', custom_context)
set_state_task(ctx, graph, 'Mysql', 'starting', 'Mysql_starting', custom_context)
relationship_operation_task(ctx, graph, 'Wordpress', 'PHP', 'cloudify.interfaces.relationship_lifecycle.establish', 'SOURCE', 'Wordpress_php_add_target', custom_context)
set_state_task(ctx, graph, 'Wordpress', 'starting', 'Wordpress_starting', custom_context)
generate_native_node_workflows(ctx, graph, custom_context, 'start')
link_tasks(graph, 'Apache_started', 'Apache_start', custom_context)
link_tasks(graph, 'Wordpress_database_add_source', 'Wordpress_started', custom_context)
link_tasks(graph, 'Mysql_started', 'Mysql_start', custom_context)
link_tasks(graph, 'PHP_started', 'PHP_start', custom_context)
link_tasks(graph, 'PHP_host_add_target', 'PHP_started', custom_context)
link_tasks(graph, 'Wordpress_started', 'Wordpress_start', custom_context)
link_tasks(graph, 'Wordpress_start', 'Wordpress_starting', custom_context)
link_tasks(graph, 'Apache_starting', 'Server_start', custom_context)
link_tasks(graph, 'PHP_start', 'PHP_starting', custom_context)
link_tasks(graph, 'PHP_starting', 'Server_start', custom_context)
link_tasks(graph, 'Apache_host_add_target', 'Apache_started', custom_context)
link_tasks(graph, 'Wordpress_database_add_target', 'Wordpress_started', custom_context)
link_tasks(graph, 'Mysql_start', 'Mysql_starting', custom_context)
link_tasks(graph, 'Wordpress_host_add_source', 'Wordpress_started', custom_context)
link_tasks(graph, 'Wordpress_host_add_target', 'Wordpress_started', custom_context)
link_tasks(graph, 'Wordpress_php_add_source', 'Wordpress_started', custom_context)
link_tasks(graph, 'Mysql_host_add_target', 'Mysql_started', custom_context)
link_tasks(graph, 'Apache_start', 'Apache_starting', custom_context)
link_tasks(graph, 'Mysql_starting', 'DataBase_start', custom_context)
link_tasks(graph, 'Wordpress_php_add_target', 'Wordpress_started', custom_context)
link_tasks(graph, 'Wordpress_starting', 'Apache_started', custom_context)
link_tasks(graph, 'Wordpress_starting', 'Mysql_started', custom_context)
link_tasks(graph, 'Wordpress_starting', 'PHP_started', custom_context)
def _a4c_stop(ctx, graph, custom_context):
# following code can be pasted in src/test/python/workflows/tasks.py for simulation
custom_context.add_customized_wf_node('Apache')
custom_context.add_customized_wf_node('Wordpress')
custom_context.add_customized_wf_node('Apache')
custom_context.add_customized_wf_node('PHP')
custom_context.add_customized_wf_node('PHP')
custom_context.add_customized_wf_node('Wordpress')
custom_context.add_customized_wf_node('Mysql')
custom_context.add_customized_wf_node('Mysql')
relationship_operation_task(ctx, graph, 'Wordpress', 'Mysql', 'cloudify.interfaces.relationship_lifecycle.unlink', 'SOURCE', 'Wordpress_database_remove_target', custom_context)
relationship_operation_task(ctx, graph, 'Wordpress', 'PHP', 'cloudify.interfaces.relationship_lifecycle.unlink', 'TARGET', 'Wordpress_php_remove_source', custom_context)
relationship_operation_task(ctx, graph, 'PHP', 'Server', 'cloudify.interfaces.relationship_lifecycle.unlink', 'SOURCE', 'PHP_host_remove_target', custom_context)
set_state_task(ctx, graph, 'Apache', 'stopped', 'Apache_stopped', custom_context)
set_state_task(ctx, graph, 'Wordpress', 'stopping', 'Wordpress_stopping', custom_context)
set_state_task(ctx, graph, 'Apache', 'stopping', 'Apache_stopping', custom_context)
operation_task(ctx, graph, 'Wordpress', 'cloudify.interfaces.lifecycle.stop', 'Wordpress_stop', custom_context)
set_state_task(ctx, graph, 'PHP', 'stopped', 'PHP_stopped', custom_context)
relationship_operation_task(ctx, graph, 'Apache', 'Server', 'cloudify.interfaces.relationship_lifecycle.unlink', 'SOURCE', 'Apache_host_remove_target', custom_context)
relationship_operation_task(ctx, graph, 'Wordpress', 'PHP', 'cloudify.interfaces.relationship_lifecycle.unlink', 'SOURCE', 'Wordpress_php_remove_target', custom_context)
set_state_task(ctx, graph, 'PHP', 'stopping', 'PHP_stopping', custom_context)
custom_context.register_native_delegate_wf_step('NetPub', 'NetPub_stop')
custom_context.register_native_delegate_wf_step('Server', 'Server_stop')
operation_task(ctx, graph, 'Mysql', 'cloudify.interfaces.lifecycle.stop', 'Mysql_stop', custom_context)
relationship_operation_task(ctx, graph, 'Wordpress', 'Apache', 'cloudify.interfaces.relationship_lifecycle.unlink', 'TARGET', 'Wordpress_host_remove_source', custom_context)
set_state_task(ctx, graph, 'Wordpress', 'stopped', 'Wordpress_stopped', custom_context)
operation_task(ctx, graph, 'Apache', 'cloudify.interfaces.lifecycle.stop', 'Apache_stop', custom_context)
set_state_task(ctx, graph, 'Mysql', 'stopped', 'Mysql_stopped', custom_context)
set_state_task(ctx, graph, 'Mysql', 'stopping', 'Mysql_stopping', custom_context)
relationship_operation_task(ctx, graph, 'Mysql', 'DataBase', 'cloudify.interfaces.relationship_lifecycle.unlink', 'SOURCE', 'Mysql_host_remove_target', custom_context)
relationship_operation_task(ctx, graph, 'Wordpress', 'Apache', 'cloudify.interfaces.relationship_lifecycle.unlink', 'SOURCE', 'Wordpress_host_remove_target', custom_context)
custom_context.register_native_delegate_wf_step('DataBase', 'DataBase_stop')
relationship_operation_task(ctx, graph, 'Wordpress', 'Mysql', 'cloudify.interfaces.relationship_lifecycle.unlink', 'TARGET', 'Wordpress_database_remove_source', custom_context)
operation_task(ctx, graph, 'PHP', 'cloudify.interfaces.lifecycle.stop', 'PHP_stop', custom_context)
generate_native_node_workflows(ctx, graph, custom_context, 'stop')
link_tasks(graph, 'Apache_stopped', 'Apache_stop', custom_context)
link_tasks(graph, 'Wordpress_stopping', 'Wordpress_host_remove_source', custom_context)
link_tasks(graph, 'Wordpress_stopping', 'Wordpress_database_remove_target', custom_context)
link_tasks(graph, 'Wordpress_stopping', 'Wordpress_php_remove_source', custom_context)
link_tasks(graph, 'Wordpress_stopping', 'Wordpress_host_remove_target', custom_context)
link_tasks(graph, 'Wordpress_stopping', 'Wordpress_database_remove_source', custom_context)
link_tasks(graph, 'Wordpress_stopping', 'Wordpress_php_remove_target', custom_context)
link_tasks(graph, 'Apache_stopping', 'Wordpress_stopped', custom_context)
link_tasks(graph, 'Apache_stopping', 'Apache_host_remove_target', custom_context)
link_tasks(graph, 'Wordpress_stop', 'Wordpress_stopping', custom_context)
link_tasks(graph, 'PHP_stopped', 'PHP_stop', custom_context)
link_tasks(graph, 'PHP_stopping', 'Wordpress_stopped', custom_context)
link_tasks(graph, 'PHP_stopping', 'PHP_host_remove_target', custom_context)
link_tasks(graph, 'Server_stop', 'Apache_stopped', custom_context)
link_tasks(graph, 'Server_stop', 'PHP_stopped', custom_context)
link_tasks(graph, 'Mysql_stop', 'Mysql_stopping', custom_context)
link_tasks(graph, 'Wordpress_stopped', 'Wordpress_stop', custom_context)
link_tasks(graph, 'Apache_stop', 'Apache_stopping', custom_context)
link_tasks(graph, 'Mysql_stopped', 'Mysql_stop', custom_context)
link_tasks(graph, 'Mysql_stopping', 'Wordpress_stopped', custom_context)
link_tasks(graph, 'Mysql_stopping', 'Mysql_host_remove_target', custom_context)
link_tasks(graph, 'DataBase_stop', 'Mysql_stopped', custom_context)
link_tasks(graph, 'PHP_stop', 'PHP_stopping', custom_context)
def _get_scaling_group_name_from_node_id(ctx, node_id):
scaling_groups=ctx.deployment.scaling_groups
for group_name, scaling_group in ctx.deployment.scaling_groups.iteritems():
for member in scaling_group['members']:
if member == node_id:
ctx.logger.info("Node {} found in scaling group {}".format(node_id, group_name))
return group_name
return None
@workflow
def a4c_scale(ctx, node_id, delta, scale_compute, **kwargs):
delta = int(delta)
scalable_entity_name = _get_scaling_group_name_from_node_id(ctx, node_id)
scaling_group = ctx.deployment.scaling_groups.get(scalable_entity_name)
if scalable_entity_name:
curr_num_instances = scaling_group['properties']['current_instances']
planned_num_instances = curr_num_instances + delta
scale_id = scalable_entity_name
else:
scaled_node = ctx.get_node(node_id)
if not scaled_node:
raise ValueError("Node {0} doesn't exist".format(scalable_entity_name))
if not is_host_node(scaled_node) and not is_kubernetes_node(scaled_node):
raise ValueError("Node {0} is not a host. This workflow can only scale hosts".format(scalable_entity_name))
if delta == 0:
ctx.logger.info('delta parameter is 0, so no scaling will take place.')
return
curr_num_instances = scaled_node.number_of_instances
planned_num_instances = curr_num_instances + delta
scale_id = scaled_node.id
scalable_entity_name = scale_id
if planned_num_instances < 1:
raise ValueError('Provided delta: {0} is illegal. current number of'
'instances of node/group {1} is {2}'
.format(delta, scalable_entity_name, curr_num_instances))
modification = ctx.deployment.start_modification({
scale_id: {
'instances': planned_num_instances
}
})
ctx.logger.info('Deployment modification started. [modification_id={0} : {1}]'.format(modification.id, dir(modification)))
try:
if delta > 0:
ctx.logger.info('Scaling host/group {0} adding {1} instances'.format(scalable_entity_name, delta))
added_and_related = _get_all_nodes(modification.added)
added = _get_all_modified_node_instances(added_and_related, 'added')
graph = ctx.graph_mode()
ctx.internal.send_workflow_event(event_type='a4c_workflow_started',
message=build_pre_event(WfStartEvent('scale', 'install')))
custom_context = CustomContext(ctx, added, added_and_related)
install_host(ctx, graph, custom_context, node_id)
try:
graph.execute()
except:
ctx.logger.error('Scale failed. Uninstalling node/group {0}'.format(scalable_entity_name))
graph = ctx.internal.task_graph
for task in graph.tasks_iter():
graph.remove_task(task)
try:
custom_context = CustomContext(ctx, added, added_and_related)
uninstall_host(ctx, graph, custom_context, scalable_entity_name)
graph.execute()
except:
ctx.logger.error('Node {0} uninstallation following scale failure has failed'.format(scalable_entity_name))
raise
else:
ctx.logger.info('Unscaling host/group {0} removing {1} instances'.format(scalable_entity_name, delta))
removed_and_related = _get_all_nodes(modification.removed)
removed = _get_all_modified_node_instances(removed_and_related, 'removed')
graph = ctx.graph_mode()
ctx.internal.send_workflow_event(event_type='a4c_workflow_started',
message=build_pre_event(WfStartEvent('scale', 'uninstall')))
custom_context = CustomContext(ctx, removed, removed_and_related)
uninstall_host(ctx, graph, custom_context, node_id)
try:
graph.execute()
except:
ctx.logger.error('Unscale failed.')
raise
except:
ctx.logger.warn('Rolling back deployment modification. [modification_id={0}]'.format(modification.id))
try:
modification.rollback()
except:
ctx.logger.warn('Deployment modification rollback failed. The '
'deployment model is most likely in some corrupted'
' state.'
'[modification_id={0}]'.format(modification.id))
raise
raise
else:
try:
modification.finish()
except:
ctx.logger.warn('Deployment modification finish failed. The '
'deployment model is most likely in some corrupted'
' state.'
'[modification_id={0}]'.format(modification.id))
raise
@workflow
def a4c_heal(
ctx,
node_instance_id,
diagnose_value='Not provided',
**kwargs):
"""Reinstalls the whole subgraph of the system topology
The subgraph consists of all the nodes that are hosted in the
failing node's compute and the compute itself.
Additionally it unlinks and establishes appropriate relationships
:param ctx: cloudify context
:param node_id: failing node's id
:param diagnose_value: diagnosed reason of failure
"""
ctx.logger.info("Starting 'heal' workflow on {0}, Diagnosis: {1}"
.format(node_instance_id, diagnose_value))
failing_node = ctx.get_node_instance(node_instance_id)
host_instance_id = failing_node._node_instance.host_id
failing_node_host = ctx.get_node_instance(host_instance_id)
node_id = failing_node_host.node_id
subgraph_node_instances = failing_node_host.get_contained_subgraph()
added_and_related = _get_all_nodes(ctx)
try:
graph = ctx.graph_mode()
ctx.internal.send_workflow_event(event_type='a4c_workflow_started',
message=build_pre_event(WfStartEvent('heal', 'uninstall')))
custom_context = CustomContext(ctx, subgraph_node_instances, added_and_related)
uninstall_host(ctx, graph, custom_context, node_id)
graph.execute()
except:
ctx.logger.error('Uninstall while healing failed.')
graph = ctx.internal.task_graph
for task in graph.tasks_iter():
graph.remove_task(task)
ctx.internal.send_workflow_event(event_type='a4c_workflow_started',
message=build_pre_event(WfStartEvent('heal', 'install')))
custom_context = CustomContext(ctx, subgraph_node_instances, added_and_related)
install_host(ctx, graph, custom_context, node_id)
graph.execute()
#following code can be pasted in src/test/python/workflows/context.py for simulation
#def _build_nodes(ctx):
#types = []
#types.append('alien.cloudify.aws.nodes.Compute')
#types.append('tosca.nodes.Compute')
#types.append('tosca.nodes.Root')
#node_Server = _build_node(ctx, 'Server', types, 1)
#types = []
#types.append('org.alien4cloud.nodes.Wordpress')
#types.append('tosca.nodes.WebApplication')
#types.append('tosca.nodes.Root')
#node_Wordpress = _build_node(ctx, 'Wordpress', types, 1)
#types = []
#types.append('org.alien4cloud.nodes.Mysql')
#types.append('org.alien4cloud.nodes.AbstractMysql')
#types.append('tosca.nodes.Database')
#types.append('tosca.nodes.Root')
#node_Mysql = _build_node(ctx, 'Mysql', types, 1)
#types = []
#types.append('org.alien4cloud.nodes.PHP')
#types.append('tosca.nodes.SoftwareComponent')
#types.append('tosca.nodes.Root')
#node_PHP = _build_node(ctx, 'PHP', types, 1)
#types = []
#types.append('org.alien4cloud.nodes.Apache')
#types.append('tosca.nodes.WebServer')
#types.append('tosca.nodes.SoftwareComponent')
#types.append('tosca.nodes.Root')
#node_Apache = _build_node(ctx, 'Apache', types, 1)
#types = []
#types.append('alien.nodes.aws.PublicNetwork')
#types.append('alien.nodes.PublicNetwork')
#types.append('tosca.nodes.Network')
#types.append('tosca.nodes.Root')
#node_NetPub = _build_node(ctx, 'NetPub', types, 1)
#types = []
#types.append('alien.cloudify.aws.nodes.Compute')
#types.append('tosca.nodes.Compute')
#types.append('tosca.nodes.Root')
#node_DataBase = _build_node(ctx, 'DataBase', types, 1)
#_add_relationship(node_Server, node_NetPub)
#_add_relationship(node_Wordpress, node_Apache)
#_add_relationship(node_Wordpress, node_Mysql)
#_add_relationship(node_Wordpress, node_PHP)
#_add_relationship(node_Mysql, node_DataBase)
#_add_relationship(node_PHP, node_Server)
#_add_relationship(node_Apache, node_Server)
| 78.574018
| 195
| 0.774249
| 9,362
| 78,024
| 6.039094
| 0.024354
| 0.165093
| 0.073296
| 0.112066
| 0.941827
| 0.934716
| 0.921027
| 0.917471
| 0.904701
| 0.897485
| 0
| 0.001267
| 0.109761
| 78,024
| 992
| 196
| 78.653226
| 0.812698
| 0.033003
| 0
| 0.843645
| 0
| 0
| 0.386051
| 0.18931
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021372
| false
| 0
| 0.017998
| 0
| 0.047244
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
259c82722265c0170eb3122613d161ae83a14e8b
| 2,625
|
py
|
Python
|
tests/features/steps/shell-wrapper.py
|
82ndAirborneDiv/geneflow2
|
521544adbae1221d2d80496016548670ce5391c1
|
[
"Apache-2.0"
] | 7
|
2019-04-11T03:50:51.000Z
|
2020-03-27T15:59:04.000Z
|
tests/features/steps/shell-wrapper.py
|
82ndAirborneDiv/geneflow2
|
521544adbae1221d2d80496016548670ce5391c1
|
[
"Apache-2.0"
] | 1
|
2019-05-06T14:18:42.000Z
|
2019-05-08T22:06:12.000Z
|
tests/features/steps/shell-wrapper.py
|
82ndAirborneDiv/geneflow2
|
521544adbae1221d2d80496016548670ce5391c1
|
[
"Apache-2.0"
] | 6
|
2019-04-10T20:25:27.000Z
|
2021-12-16T15:59:59.000Z
|
import os
from pprint import pprint
import sys
from geneflow.shell_wrapper import ShellWrapper
@given('I create a "{shell}" Shell instance')
def step_impl(context, shell):
context.shell[shell] = {}
context.shell[shell]['shell'] = ShellWrapper()
@when('I run the invoke method for the "{shell}" Shell instance with a valid argument')
def step_impl(context, shell):
context.shell[shell]['invoke_correct_result'] = context.shell[shell]['shell'].invoke("echo 'Hello World!'")
@then('I see a valid response for invoke method of the "{shell}" Shell instance')
def step_impl(context, shell):
assert context.shell[shell]['invoke_correct_result'] == b'Hello World!\n'
@when('I run the invoke method for the "{shell}" Shell instance with an invalid argument')
def step_impl(context, shell):
context.shell[shell]['invoke_incorrect_result'] = context.shell[shell]['shell'].invoke("echo-invalid 'Hello World!'")
@then('I see a False return value for the invoke method of the "{shell}" Shell instance')
def step_impl(context, shell):
assert context.shell[shell]['invoke_incorrect_result'] == False
@when('I run the spawn method for the "{shell}" Shell instance with a valid argument')
def step_impl(context, shell):
context.shell[shell]['spawn_proc'] = context.shell[shell]['shell'].spawn("sleep 30")
@then('I see a valid response for the spawn method of the "{shell}" Shell instance')
def step_impl(context, shell):
assert context.shell[shell]['spawn_proc']
context.shell[shell]['spawn_proc'].wait()
assert context.shell[shell]['spawn_proc'].returncode == 0
@when('I run the spawn method for the "{shell}" Shell instance with an invalid argument')
def step_impl(context, shell):
context.shell[shell]['spawn_proc'] = context.shell[shell]['shell'].spawn("sleep-invalid 30")
@then('I see a negative result for the spawn method of the "{shell}" Shell instance')
def step_impl(context, shell):
if context.shell[shell]['spawn_proc']:
# valid process, check return value
context.shell[shell]['spawn_proc'].wait()
assert context.shell[shell]['spawn_proc'].returncode != 0
else:
assert context.shell[shell]['spawn_proc'] == False
@when('I call the is_running method for the "{shell}" Shell instance')
def step_impl(context, shell):
context.shell[shell]['process_status'] = context.shell[shell]['shell'].is_running(context.shell[shell]['spawn_proc'])
@then('I see a value of True returned for is_running method of the "{shell}" Shell instance')
def step_impl(context, shell):
assert context.shell[shell]['process_status'] == True
| 36.458333
| 121
| 0.717333
| 382
| 2,625
| 4.837696
| 0.164921
| 0.21645
| 0.21158
| 0.107143
| 0.821429
| 0.772186
| 0.710498
| 0.643939
| 0.639069
| 0.637446
| 0
| 0.002662
| 0.141333
| 2,625
| 71
| 122
| 36.971831
| 0.817214
| 0.012571
| 0
| 0.295455
| 0
| 0
| 0.436244
| 0.034003
| 0
| 0
| 0
| 0
| 0.159091
| 1
| 0.25
| false
| 0
| 0.090909
| 0
| 0.340909
| 0.022727
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
25af26b3d99d6dec9782b4da310bb10a8fca0dc4
| 29,610
|
py
|
Python
|
modelling/information/single_period_information_pb2.py
|
nareshram256/EnergyManagementSystem
|
2a48ba3b9bf7ff3003c197ee43ea9efbfbe42baa
|
[
"MIT"
] | 9
|
2020-04-24T14:34:16.000Z
|
2022-01-25T07:16:03.000Z
|
modelling/information/single_period_information_pb2.py
|
casemsee/EnergyManagementSystem
|
2a48ba3b9bf7ff3003c197ee43ea9efbfbe42baa
|
[
"MIT"
] | null | null | null |
modelling/information/single_period_information_pb2.py
|
casemsee/EnergyManagementSystem
|
2a48ba3b9bf7ff3003c197ee43ea9efbfbe42baa
|
[
"MIT"
] | 7
|
2019-09-19T13:26:02.000Z
|
2021-11-27T09:53:54.000Z
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: single_period_information.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='single_period_information.proto',
package='',
syntax='proto3',
serialized_pb=_b('\n\x1fsingle_period_information.proto\"\xf9\t\n\tmicrogrid\x12\x0c\n\x04\x41REA\x18\x01 \x01(\x05\x12\x12\n\nTIME_STAMP\x18\x02 \x01(\x05\x12\x1d\n\x02\x64g\x18\x03 \x03(\x0b\x32\x11.microgrid.DgType\x12\x1f\n\x03\x65ss\x18\x04 \x03(\x0b\x32\x12.microgrid.EssType\x12\x1d\n\x02pv\x18\x05 \x03(\x0b\x32\x11.microgrid.PvType\x12\x1d\n\x02wp\x18\x06 \x03(\x0b\x32\x11.microgrid.WpType\x12(\n\x07load_ac\x18\x07 \x03(\x0b\x32\x17.microgrid.Load_AC_Type\x12(\n\x07load_dc\x18\x08 \x03(\x0b\x32\x17.microgrid.Load_DC_Type\x12&\n\x03\x62ic\x18\t \x03(\x0b\x32\x19.microgrid.Convertor_Type\x12\x0b\n\x03PMG\x18\n \x01(\x02\x12\x0c\n\x04V_DC\x18\x0b \x01(\x02\x12\x14\n\x0c\x43OMMAND_TYPE\x18\x0c \x01(\x05\x12\x1a\n\x12\x43OMMAND_TIME_STAMP\x18\r \x01(\x05\x12\x0c\n\x04\x43OST\x18\x0e \x01(\x02\x1a\xbf\x01\n\x06\x44gType\x12\n\n\x02ID\x18\x01 \x01(\x05\x12\x0e\n\x06STATUS\x18\x02 \x01(\x05\x12\n\n\x02PG\x18\x03 \x01(\x05\x12\n\n\x02QG\x18\x04 \x01(\x05\x12\n\n\x02VG\x18\x05 \x01(\x02\x12\x0b\n\x03\x41PF\x18\x06 \x01(\x05\x12\x18\n\x10\x43OMMAND_START_UP\x18\x07 \x01(\x05\x12\x12\n\nCOMMAND_VG\x18\x08 \x01(\x02\x12\x12\n\nCOMMAND_PG\x18\t \x01(\x05\x12\x12\n\nCOMMAND_QG\x18\n \x01(\x05\x12\x12\n\nCOMMAND_RG\x18\x0b \x01(\x05\x1ar\n\x07\x45ssType\x12\n\n\x02ID\x18\x01 \x01(\x05\x12\x0e\n\x06STATUS\x18\x02 \x01(\x05\x12\x0b\n\x03SOC\x18\x03 \x01(\x02\x12\n\n\x02PG\x18\x04 \x01(\x05\x12\n\n\x02RG\x18\x05 \x01(\x05\x12\x12\n\nCOMMAND_PG\x18\x06 \x01(\x05\x12\x12\n\nCOMMAND_RG\x18\x07 \x01(\x05\x1aU\n\x06PvType\x12\t\n\x01N\x18\x01 \x01(\x05\x12\n\n\x02PG\x18\x02 \x01(\x05\x12\n\n\x02QG\x18\x03 \x01(\x05\x12\x14\n\x0c\x43OMMAND_CURT\x18\x04 \x01(\x05\x12\x12\n\nCOMMAND_PG\x18\x05 \x01(\x05\x1aU\n\x06WpType\x12\t\n\x01N\x18\x01 \x01(\x05\x12\n\n\x02PG\x18\x02 \x01(\x02\x12\n\n\x02QG\x18\x03 \x01(\x05\x12\x14\n\x0c\x43OMMAND_CURT\x18\x04 \x01(\x05\x12\x12\n\nCOMMAND_PG\x18\x05 \x01(\x05\x1a\x83\x01\n\x0cLoad_AC_Type\x12\n\n\x02ID\x18\x01 \x01(\x05\x12\x0e\n\x06STATUS\x18\x02 \x01(\x05\x12\n\n\x02PD\x18\x03 \x01(\x05\x12\n\n\x02QD\x18\x04 \x01(\x05\x12\n\n\x02PF\x18\x05 \x01(\x02\x12\x0b\n\x03\x41PF\x18\x06 \x01(\x02\x12\x12\n\nCOMMAND_PD\x18\x07 \x01(\x02\x12\x12\n\nCOMMAND_RD\x18\x08 \x01(\x02\x1ak\n\x0cLoad_DC_Type\x12\n\n\x02ID\x18\x01 \x01(\x05\x12\x0e\n\x06STATUS\x18\x02 \x01(\x05\x12\n\n\x02PD\x18\x03 \x01(\x05\x12\x0b\n\x03\x41PF\x18\x04 \x01(\x02\x12\x12\n\nCOMMAND_PD\x18\x05 \x01(\x02\x12\x12\n\nCOMMAND_RD\x18\x06 \x01(\x02\x1a\x9d\x01\n\x0e\x43onvertor_Type\x12\n\n\x02ID\x18\x01 \x01(\x05\x12\x0e\n\x06STATUS\x18\x02 \x01(\x05\x12\x0f\n\x07P_AC2DC\x18\x03 \x01(\x05\x12\x0f\n\x07P_DC2AC\x18\x04 \x01(\x05\x12\x0c\n\x04Q_AC\x18\x05 \x01(\x05\x12\x15\n\rCOMMAND_AC2DC\x18\x06 \x01(\x02\x12\x15\n\rCOMMAND_DC2AC\x18\x07 \x01(\x02\x12\x11\n\tCOMMAND_Q\x18\x08 \x01(\x05\x62\x06proto3')
)
_MICROGRID_DGTYPE = _descriptor.Descriptor(
name='DgType',
full_name='microgrid.DgType',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='ID', full_name='microgrid.DgType.ID', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='STATUS', full_name='microgrid.DgType.STATUS', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='PG', full_name='microgrid.DgType.PG', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='QG', full_name='microgrid.DgType.QG', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='VG', full_name='microgrid.DgType.VG', index=4,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='APF', full_name='microgrid.DgType.APF', index=5,
number=6, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='COMMAND_START_UP', full_name='microgrid.DgType.COMMAND_START_UP', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='COMMAND_VG', full_name='microgrid.DgType.COMMAND_VG', index=7,
number=8, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='COMMAND_PG', full_name='microgrid.DgType.COMMAND_PG', index=8,
number=9, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='COMMAND_QG', full_name='microgrid.DgType.COMMAND_QG', index=9,
number=10, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='COMMAND_RG', full_name='microgrid.DgType.COMMAND_RG', index=10,
number=11, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=425,
serialized_end=616,
)
_MICROGRID_ESSTYPE = _descriptor.Descriptor(
name='EssType',
full_name='microgrid.EssType',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='ID', full_name='microgrid.EssType.ID', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='STATUS', full_name='microgrid.EssType.STATUS', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='SOC', full_name='microgrid.EssType.SOC', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='PG', full_name='microgrid.EssType.PG', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='RG', full_name='microgrid.EssType.RG', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='COMMAND_PG', full_name='microgrid.EssType.COMMAND_PG', index=5,
number=6, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='COMMAND_RG', full_name='microgrid.EssType.COMMAND_RG', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=618,
serialized_end=732,
)
_MICROGRID_PVTYPE = _descriptor.Descriptor(
name='PvType',
full_name='microgrid.PvType',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='N', full_name='microgrid.PvType.N', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='PG', full_name='microgrid.PvType.PG', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='QG', full_name='microgrid.PvType.QG', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='COMMAND_CURT', full_name='microgrid.PvType.COMMAND_CURT', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='COMMAND_PG', full_name='microgrid.PvType.COMMAND_PG', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=734,
serialized_end=819,
)
_MICROGRID_WPTYPE = _descriptor.Descriptor(
name='WpType',
full_name='microgrid.WpType',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='N', full_name='microgrid.WpType.N', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='PG', full_name='microgrid.WpType.PG', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='QG', full_name='microgrid.WpType.QG', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='COMMAND_CURT', full_name='microgrid.WpType.COMMAND_CURT', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='COMMAND_PG', full_name='microgrid.WpType.COMMAND_PG', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=821,
serialized_end=906,
)
_MICROGRID_LOAD_AC_TYPE = _descriptor.Descriptor(
name='Load_AC_Type',
full_name='microgrid.Load_AC_Type',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='ID', full_name='microgrid.Load_AC_Type.ID', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='STATUS', full_name='microgrid.Load_AC_Type.STATUS', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='PD', full_name='microgrid.Load_AC_Type.PD', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='QD', full_name='microgrid.Load_AC_Type.QD', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='PF', full_name='microgrid.Load_AC_Type.PF', index=4,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='APF', full_name='microgrid.Load_AC_Type.APF', index=5,
number=6, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='COMMAND_PD', full_name='microgrid.Load_AC_Type.COMMAND_PD', index=6,
number=7, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='COMMAND_RD', full_name='microgrid.Load_AC_Type.COMMAND_RD', index=7,
number=8, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=909,
serialized_end=1040,
)
_MICROGRID_LOAD_DC_TYPE = _descriptor.Descriptor(
name='Load_DC_Type',
full_name='microgrid.Load_DC_Type',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='ID', full_name='microgrid.Load_DC_Type.ID', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='STATUS', full_name='microgrid.Load_DC_Type.STATUS', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='PD', full_name='microgrid.Load_DC_Type.PD', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='APF', full_name='microgrid.Load_DC_Type.APF', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='COMMAND_PD', full_name='microgrid.Load_DC_Type.COMMAND_PD', index=4,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='COMMAND_RD', full_name='microgrid.Load_DC_Type.COMMAND_RD', index=5,
number=6, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1042,
serialized_end=1149,
)
_MICROGRID_CONVERTOR_TYPE = _descriptor.Descriptor(
name='Convertor_Type',
full_name='microgrid.Convertor_Type',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='ID', full_name='microgrid.Convertor_Type.ID', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='STATUS', full_name='microgrid.Convertor_Type.STATUS', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='P_AC2DC', full_name='microgrid.Convertor_Type.P_AC2DC', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='P_DC2AC', full_name='microgrid.Convertor_Type.P_DC2AC', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='Q_AC', full_name='microgrid.Convertor_Type.Q_AC', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='COMMAND_AC2DC', full_name='microgrid.Convertor_Type.COMMAND_AC2DC', index=5,
number=6, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='COMMAND_DC2AC', full_name='microgrid.Convertor_Type.COMMAND_DC2AC', index=6,
number=7, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='COMMAND_Q', full_name='microgrid.Convertor_Type.COMMAND_Q', index=7,
number=8, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1152,
serialized_end=1309,
)
_MICROGRID = _descriptor.Descriptor(
name='microgrid',
full_name='microgrid',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='AREA', full_name='microgrid.AREA', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='TIME_STAMP', full_name='microgrid.TIME_STAMP', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='dg', full_name='microgrid.dg', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ess', full_name='microgrid.ess', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pv', full_name='microgrid.pv', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='wp', full_name='microgrid.wp', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='load_ac', full_name='microgrid.load_ac', index=6,
number=7, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='load_dc', full_name='microgrid.load_dc', index=7,
number=8, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bic', full_name='microgrid.bic', index=8,
number=9, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='PMG', full_name='microgrid.PMG', index=9,
number=10, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='V_DC', full_name='microgrid.V_DC', index=10,
number=11, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='COMMAND_TYPE', full_name='microgrid.COMMAND_TYPE', index=11,
number=12, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='COMMAND_TIME_STAMP', full_name='microgrid.COMMAND_TIME_STAMP', index=12,
number=13, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='COST', full_name='microgrid.COST', index=13,
number=14, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_MICROGRID_DGTYPE, _MICROGRID_ESSTYPE, _MICROGRID_PVTYPE, _MICROGRID_WPTYPE, _MICROGRID_LOAD_AC_TYPE, _MICROGRID_LOAD_DC_TYPE, _MICROGRID_CONVERTOR_TYPE, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=36,
serialized_end=1309,
)
_MICROGRID_DGTYPE.containing_type = _MICROGRID
_MICROGRID_ESSTYPE.containing_type = _MICROGRID
_MICROGRID_PVTYPE.containing_type = _MICROGRID
_MICROGRID_WPTYPE.containing_type = _MICROGRID
_MICROGRID_LOAD_AC_TYPE.containing_type = _MICROGRID
_MICROGRID_LOAD_DC_TYPE.containing_type = _MICROGRID
_MICROGRID_CONVERTOR_TYPE.containing_type = _MICROGRID
_MICROGRID.fields_by_name['dg'].message_type = _MICROGRID_DGTYPE
_MICROGRID.fields_by_name['ess'].message_type = _MICROGRID_ESSTYPE
_MICROGRID.fields_by_name['pv'].message_type = _MICROGRID_PVTYPE
_MICROGRID.fields_by_name['wp'].message_type = _MICROGRID_WPTYPE
_MICROGRID.fields_by_name['load_ac'].message_type = _MICROGRID_LOAD_AC_TYPE
_MICROGRID.fields_by_name['load_dc'].message_type = _MICROGRID_LOAD_DC_TYPE
_MICROGRID.fields_by_name['bic'].message_type = _MICROGRID_CONVERTOR_TYPE
DESCRIPTOR.message_types_by_name['microgrid'] = _MICROGRID
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
microgrid = _reflection.GeneratedProtocolMessageType('microgrid', (_message.Message,), dict(
DgType = _reflection.GeneratedProtocolMessageType('DgType', (_message.Message,), dict(
DESCRIPTOR = _MICROGRID_DGTYPE,
__module__ = 'single_period_information_pb2'
# @@protoc_insertion_point(class_scope:microgrid.DgType)
))
,
EssType = _reflection.GeneratedProtocolMessageType('EssType', (_message.Message,), dict(
DESCRIPTOR = _MICROGRID_ESSTYPE,
__module__ = 'single_period_information_pb2'
# @@protoc_insertion_point(class_scope:microgrid.EssType)
))
,
PvType = _reflection.GeneratedProtocolMessageType('PvType', (_message.Message,), dict(
DESCRIPTOR = _MICROGRID_PVTYPE,
__module__ = 'single_period_information_pb2'
# @@protoc_insertion_point(class_scope:microgrid.PvType)
))
,
WpType = _reflection.GeneratedProtocolMessageType('WpType', (_message.Message,), dict(
DESCRIPTOR = _MICROGRID_WPTYPE,
__module__ = 'single_period_information_pb2'
# @@protoc_insertion_point(class_scope:microgrid.WpType)
))
,
Load_AC_Type = _reflection.GeneratedProtocolMessageType('Load_AC_Type', (_message.Message,), dict(
DESCRIPTOR = _MICROGRID_LOAD_AC_TYPE,
__module__ = 'single_period_information_pb2'
# @@protoc_insertion_point(class_scope:microgrid.Load_AC_Type)
))
,
Load_DC_Type = _reflection.GeneratedProtocolMessageType('Load_DC_Type', (_message.Message,), dict(
DESCRIPTOR = _MICROGRID_LOAD_DC_TYPE,
__module__ = 'single_period_information_pb2'
# @@protoc_insertion_point(class_scope:microgrid.Load_DC_Type)
))
,
Convertor_Type = _reflection.GeneratedProtocolMessageType('Convertor_Type', (_message.Message,), dict(
DESCRIPTOR = _MICROGRID_CONVERTOR_TYPE,
__module__ = 'single_period_information_pb2'
# @@protoc_insertion_point(class_scope:microgrid.Convertor_Type)
))
,
DESCRIPTOR = _MICROGRID,
__module__ = 'single_period_information_pb2'
# @@protoc_insertion_point(class_scope:microgrid)
))
_sym_db.RegisterMessage(microgrid)
_sym_db.RegisterMessage(microgrid.DgType)
_sym_db.RegisterMessage(microgrid.EssType)
_sym_db.RegisterMessage(microgrid.PvType)
_sym_db.RegisterMessage(microgrid.WpType)
_sym_db.RegisterMessage(microgrid.Load_AC_Type)
_sym_db.RegisterMessage(microgrid.Load_DC_Type)
_sym_db.RegisterMessage(microgrid.Convertor_Type)
# @@protoc_insertion_point(module_scope)
| 39.90566
| 2,830
| 0.757717
| 4,334
| 29,610
| 4.894093
| 0.052146
| 0.075433
| 0.057706
| 0.060346
| 0.834944
| 0.761256
| 0.733016
| 0.72222
| 0.713781
| 0.713781
| 0
| 0.05127
| 0.106788
| 29,610
| 741
| 2,831
| 39.959514
| 0.750718
| 0.02104
| 0
| 0.728959
| 1
| 0.001427
| 0.184476
| 0.143404
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.008559
| 0
| 0.008559
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
25fbe05caea2fc07988ecae624b5851ed3733a86
| 67,461
|
py
|
Python
|
examples/pokekombat.py
|
corycook/jalex
|
713493bd5df9d7d275d325b64d15543c8da42e88
|
[
"MIT"
] | 2
|
2017-06-24T09:45:56.000Z
|
2019-04-28T09:20:56.000Z
|
examples/pokekombat.py
|
corycook/jalex
|
713493bd5df9d7d275d325b64d15543c8da42e88
|
[
"MIT"
] | null | null | null |
examples/pokekombat.py
|
corycook/jalex
|
713493bd5df9d7d275d325b64d15543c8da42e88
|
[
"MIT"
] | null | null | null |
# PokeKombat!!!
# The mystical Pokemon Eevee must fight his way through countless enemies to prove his worth
# Hyper beam is now swift, I feel Eevee would die if he actually used-
# Hyper beam as much as he was origanally expected to.
# Add costom poke death animation so a new pokeball comes up
# Make Eevee and Psyduck not collideable just like scyther and just make
# missiles collideable
from v1shim import games, color
import math, random
Missile_Odds = 1
games.init(screen_width=840, screen_height=480, fps=50)
class Psyduck(games.Sprite):
"""
An evil Psyduck!!!
"""
image = games.load_image("psyduck.png")
sound = games.load_sound("cubone.wav")
MISSILE_DELAY = 25
Left = False
Right = False
total = 0
stone = 0
odds = 2
missile_odds = 100
health = games.Text(value=100,
size=30,
color=color.blue,
top=5,
right=games.screen.width - 20,
is_collideable=False)
games.screen.add(health)
def __init__(self, game, x, y, speed=2, odds_change=200):
""" Initialize Psyduck! """
Psyduck.total += 1
super(Psyduck, self).__init__(image=Psyduck.image,
x=x, y=y, dx=speed)
self.odds_change = odds_change
self.missile_wait = 0
def die(self):
size = 9
Psyduck.total -= 1
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
self.destroy()
new_cubone = Cubone(x=self.x, y=self.y, size=size)
games.screen.add(new_cubone)
stone = random.randint(1, 3)
if stone == 2:
new_waterstone = Water_Stone(game=self,
x=400,
y=2)
games.screen.add(new_waterstone)
Psyduck.sound.play()
if stone == 1:
new_thunderstone = Thunder_Stone(game=self,
x=400,
y=2)
games.screen.add(new_thunderstone)
Psyduck.sound.play()
if stone == 3:
new_firestone = Fire_Stone(game=self,
x=400,
y=2)
games.screen.add(new_firestone)
Psyduck.sound.play()
def die2(self):
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
def die3(self):
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
def die4(self):
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
def decrease_health(self):
Psyduck.health.value -= 10
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
def update(self):
"""IT'S ALIVEEEEE!!!!!"""
if random.randrange(self.odds_change) == 0:
self.odds += 1
self.dx = -self.dx
if random.randrange(self.missile_odds) == 0:
new_bubble = Bubble(self.x, self.y)
games.screen.add(new_bubble)
self.bubble_wait = Psyduck.MISSILE_DELAY
if self.odds % 2 == 0:
image2 = games.load_image("psyduck2.bmp")
self.set_image(image2)
Psyduck.Left = False
Psyduck.Right = True
else:
image = games.load_image("psyduck.png")
self.set_image(image)
Psyduck.Left = True
Psyduck.Right = False
if self.left > games.screen.width:
self.right = 0
if self.right < 0:
self.left = games.screen.width
# if self.overlapping_sprites:
# PsyduckStats[0] = PsyduckStats[0] - 10
if self.health.value <= 0:
self.die()
class Cubone(games.Sprite):
"""
An evil Cubone!!!
"""
image = games.load_image("cubone.png")
MISSILE_DELAY = 25
Left = False
Right = False
total = 0
odds = 2 # must add this
missile_odds = 100 # must add this too
sound = games.load_sound("charizard.wav")
health = games.Text(value=100,
size=30,
color=color.brown,
top=25,
right=games.screen.width - 20,
is_collideable=False)
games.screen.add(health)
def __init__(self, size, x, y, speed=2, odds_change=200):
""" Initialize Cubone! """
Cubone.total += 1
super(Cubone, self).__init__(image=Cubone.image,
x=x, y=y, dx=speed)
self.odds_change = odds_change
self.missile_wait = 0
def decrease_health(self):
Cubone.health.value -= 10
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
def die(self):
size = 9
Cubone.total -= 1
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
self.destroy()
new_charizard = Charizard(x=self.x, y=self.y, size=size)
games.screen.add(new_charizard)
# new_firestone = Fire_Stone(game = self,
# x = 400,
# y = 2)
# games.screen.add(new_firestone)
Cubone.sound.play()
def die2(self):
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
def die3(self):
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
def die4(self):
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
def update(self):
"""IT'S ALIVEEEEE!!!!!"""
self.size = 9
if random.randrange(self.odds_change) == 0:
self.odds += 1 # add this
self.dx = -self.dx
if random.randrange(self.missile_odds) == 0: # add this too
new_bone = Bone(self.x, self.y)
games.screen.add(new_bone)
self.bubble_wait = Cubone.MISSILE_DELAY
if self.odds % 2 == 0: # ADD ALL OF THIS
image2 = games.load_image("cubone2.bmp")
self.set_image(image2)
Cubone.Left = False
Cubone.Right = True
else:
image = games.load_image("cubone.png")
self.set_image(image)
Cubone.Left = True
Cubone.Right = False
# To here ^^^^^
if self.left > games.screen.width:
self.right = 0
if self.right < 0:
self.left = games.screen.width
# if self.overlapping_sprites:
# CuboneStats[0] = CuboneStats[0] - 10
if self.health.value <= 0:
self.die()
class Charizard(games.Sprite):
"""
An evil Cubone!!!
"""
image = games.load_image("charizard.png")
MISSILE_DELAY = 25
Left = False
Right = False
total = 0
odds = 2
missile_odds = 100
sound = games.load_sound("snorlax.wav")
health = games.Text(value=100,
size=30,
color=color.red,
top=45,
right=games.screen.width - 20,
is_collideable=False)
games.screen.add(health)
def __init__(self, size, x, y, speed=3, odds_change=100):
""" Initialize Cubone! """
Charizard.total += 1
super(Charizard, self).__init__(image=Charizard.image,
x=x, y=y, dx=speed)
self.odds_change = odds_change
self.missile_wait = 0
def decrease_health(self):
Charizard.health.value -= 10
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
def die(self):
size = 9
Charizard.total -= 1
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
self.destroy()
new_snorlax = Snorlax(x=self.x, y=self.y, size=size)
games.screen.add(new_snorlax)
# new_thunderstone = Thunder_Stone(game = self,
# x = 400,
# y = 2)
# games.screen.add(new_thunderstone)
Charizard.sound.play()
def die2(self):
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
def die3(self):
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
def die4(self):
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
def update(self):
"""IT'S ALIVEEEEE!!!!!"""
self.size = 9
if random.randrange(self.odds_change) == 0:
self.odds += 1
self.dx = -self.dx
if random.randrange(self.missile_odds) == 0:
new_Fireball = Fireball(self.x, self.y)
games.screen.add(new_Fireball)
self.bubble_wait = Charizard.MISSILE_DELAY
if self.odds % 2 == 0: # ADD ALL OF THIS
image2 = games.load_image("charizard2.bmp")
self.set_image(image2)
Charizard.Left = False
Charizard.Right = True
else:
image = games.load_image("charizard.png")
self.set_image(image)
Charizard.Left = True
Charizard.Right = False
if self.left > games.screen.width:
self.right = 0
if self.right < 0:
self.left = games.screen.width
# if self.overlapping_sprites:
# CuboneStats[0] = CuboneStats[0] - 10
if self.health.value <= 0:
self.die()
class Snorlax(games.Sprite):
"""
An really cute Snorlax!!!
"""
image = games.load_image("snorlax.png")
MISSILE_DELAY = 25
Left = False
Right = False
total = 0
odds = 2
boundary1 = 0
boundary2 = 0
missile_odds = 100
jump_odds = 100
sound = games.load_sound("mewtwo.wav")
sound2 = games.load_sound("snorlax2.wav")
health = games.Text(value=100,
size=30,
color=color.white,
top=65,
right=games.screen.width - 20,
is_collideable=False)
games.screen.add(health)
def __init__(self, size, x, y, speed=3, odds_change=100):
""" Initialize Snorlax! """
Snorlax.total += 1
super(Snorlax, self).__init__(image=Snorlax.image,
x=x, y=y, dx=speed)
self.odds_change = odds_change
self.missile_wait = 0
def decrease_health(self):
Snorlax.health.value -= 10
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
def die(self):
size = 9
Snorlax.total -= 1
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
self.destroy()
new_mewtwo = Mewtwo(x=self.x, y=400, size=size)
games.screen.add(new_mewtwo)
# new_thunderstone = Thunder_Stone(game = self,
# x = 400,
# y = 2)
# games.screen.add(new_thunderstone)
Snorlax.sound.play()
def die2(self):
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
def die3(self):
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
def die4(self):
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
def update(self):
"""IT'S ALIVEEEEE!!!!!"""
self.size = 9
self.boundary1 = self.x + 350
self.boundary2 = self.x - 350
if random.randrange(self.odds_change) == 0:
self.odds += 1
self.dx = -self.dx
if random.randrange(self.jump_odds) == 0:
self.dy = - 5
Snorlax.sound2.play()
if self.bottom < 350:
self.dy = -self.dy
if self.top > 390:
self.dy = 0
self.y = 400
if self.top > 400:
self.dy = 0
self.y = 400
# if random.randrange(self.missile_odds) ==0:
# new_Fireball = Fireball(self.x, self.y)
# games.screen.add(new_Fireball)
# self.bubble_wait = Snorlax.MISSILE_DELAY
if self.odds % 2 == 0: # ADD ALL OF THIS
image2 = games.load_image("snorlax2.bmp")
self.set_image(image2)
Charizard.Left = False
Charizard.Right = True
else:
image = games.load_image("snorlax.png")
self.set_image(image)
Charizard.Left = True
Charizard.Right = False
if self.left > games.screen.width:
self.right = 0
if self.right < 0:
self.left = games.screen.width
# if self.overlapping_sprites:
# CuboneStats[0] = CuboneStats[0] - 10
if self.overlapping_sprites:
for sprite in self.overlapping_sprites:
sprite.decrease_health()
if sprite.x < self.x:
self.x += 50
sprite.x -= 50 # this will make sprite fall back when hit
else:
sprite.x += 50
self.x -= 50
if self.health.value <= 0:
self.die()
class Mewtwo(games.Sprite):
"""
An evil Mewtwo!!!
"""
image = games.load_image("mewtwo.png")
MISSILE_DELAY = 25
Left = False
Right = False
odds = 2
missile_odds = 50
health = games.Text(value=150,
size=30,
color=color.purple,
top=85,
right=games.screen.width - 20,
is_collideable=False)
games.screen.add(health)
def __init__(self, size, x, y, speed=3, odds_change=100):
""" Initialize Mewtwo! """
super(Mewtwo, self).__init__(image=Mewtwo.image,
x=x, y=y, dx=speed)
self.odds_change = odds_change
self.missile_wait = 0
def win(self):
"""Ends game"""
end_message = games.Message(value="You Win!",
size=90,
color=color.blue,
x=games.screen.width / 2,
y=games.screen.height / 2,
lifetime=5 * games.screen.fps,
after_death=games.screen.quit,
is_collideable=False)
games.screen.add(end_message)
def decrease_health(self):
Mewtwo.health.value -= 10
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
def die(self):
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
self.destroy()
self.win()
def die2(self):
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
def die3(self):
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
def die4(self):
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
def update(self):
"""IT'S ALIVEEEEE!!!!!"""
self.size = 9
if random.randrange(self.odds_change) == 0:
self.odds += 1
self.dx = -self.dx
if random.randrange(self.missile_odds) == 0:
new_Shadowball = Shadowball(self.x, self.y)
games.screen.add(new_Shadowball)
self.bubble_wait = Mewtwo.MISSILE_DELAY
if self.odds % 2 == 0: # ADD ALL OF THIS
image2 = games.load_image("mewtwo2.png")
self.set_image(image2)
Mewtwo.Left = False
Mewtwo.Right = True
else:
image = games.load_image("mewtwo.png")
self.set_image(image)
Mewtwo.Left = True
Mewtwo.Right = False
# if self.overlapping_sprites:
# CuboneStats[0] = CuboneStats[0] - 10
if self.left > games.screen.width:
self.right = 0
if self.right < 0:
self.left = games.screen.width
if self.health.value <= 0:
self.die()
class Eevee(games.Sprite):
"""
An Eevee that is ready to fight!
"""
image = games.load_image("eevee.png")
sound = games.load_sound("eevee3.wav")
soundend = games.load_sound("gameover.wav")
MISSILE_DELAY = 25
JUMP_DELAY = 50
Left = False
Right = False
total = 0
health = games.Text(value=100,
size=30,
color=color.red,
top=5,
right=games.screen.width - 710,
is_collideable=False)
games.screen.add(health)
stone = games.Text(value="Eevee",
size=30,
color=color.white,
top=5,
right=games.screen.width - 745,
is_collideable=False)
games.screen.add(stone)
def __init__(self, x, y):
""" Initialize Eevee like a boss, and keeps his score."""
super(Eevee, self).__init__(image=Eevee.image,
x=x, y=y)
self.missile_wait = 0
self.jump_wait = 0
def end(self):
"""Ends game"""
end_message = games.Message(value="Game Over",
size=90,
color=color.red,
x=games.screen.width / 2,
y=games.screen.height / 2,
lifetime=5 * games.screen.fps,
after_death=games.screen.quit,
is_collideable=False)
Eevee.soundend.play()
games.screen.add(end_message)
def die(self):
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
self.destroy()
self.end()
def die2(self):
Eevee.total = 1
size = 9
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
self.destroy()
self.health.destroy()
self.stone.destroy()
Jolteon.health.destroy()
Flareon.health.destroy()
Jolteon.stone.destroy()
Flareon.stone.destroy()
new_vaporeon = Vaporeon(x=400, y=400)
games.screen.add(new_vaporeon)
# Charizard.sound.play()
def die3(self):
Eevee.total = 2
size = 9
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
self.destroy()
self.health.destroy()
self.stone.destroy()
Vaporeon.health.destroy()
Flareon.health.destroy()
Vaporeon.stone.destroy()
Flareon.stone.destroy()
new_jolteon = Jolteon(x=self.x, y=400)
games.screen.add(new_jolteon)
def die4(self):
Eevee.total = 3
size = 9
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
self.destroy()
self.health.destroy()
self.stone.destroy()
Vaporeon.health.destroy()
Jolteon.health.destroy()
Vaporeon.stone.destroy()
Jolteon.stone.destroy()
new_flareon = Flareon(x=self.x, y=400)
games.screen.add(new_flareon)
def die5(self):
Eevee.total = 4
size = 9
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
self.destroy()
self.health.destroy()
self.stone.destroy()
new_mew = Mew(x=self.x, y=400)
games.screen.add(new_mew)
def decrease_health(self):
Eevee.health.value -= 10
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
def water_evolve(self):
Eevee.stone.value = "Vaporean"
def update(self):
"""Move and jump around! Just like Eevee in real life!!!"""
if games.keyboard.is_pressed(games.K_LEFT):
image2 = games.load_image("eevee2.bmp")
self.set_image(image2)
Eevee.Left = True
Eevee.Right = False
self.x -= 2
if games.keyboard.is_pressed(games.K_RIGHT):
image = games.load_image("eevee.png")
self.set_image(image)
Eevee.Left = False
Eevee.Right = True
self.x += 2
if games.keyboard.is_pressed(games.K_1) and games.keyboard.is_pressed(games.K_5) and games.keyboard.is_pressed(games.K_0):
self.die5()
if games.keyboard.is_pressed(games.K_UP) and self.jump_wait == 0:
Eevee.sound.play()
self.jump_wait = Eevee.JUMP_DELAY
self.dy = -4
if self.bottom < 350:
self.dy = -self.dy
if self.top > 360:
self.dy = 0
self.y = 400
# HEALTH
# if self.overlapping_sprites:
# EeveeStats[0] = EeveeStats[0] - 10
# Eevee.health.value -= 10
if self.health.value == 0:
self.die()
# Wraps Eevee around screen lol
if self.left > games.screen.width:
self.right = 0
if self.right < 0:
self.left = games.screen.width
# If waiting until eevee can use Hyper Beam again decrease wait.
if self.missile_wait > 0:
self.missile_wait -= 1
# If waiting until eevee can jum again decrease wait.
if self.jump_wait > 0:
self.jump_wait -= 1
# Use Hyper Beam if spacebar pressed and missile wait is over
if games.keyboard.is_pressed(games.K_SPACE) and self.missile_wait == 0:
new_missile = Missile(self.x, self.y)
games.screen.add(new_missile)
self.missile_wait = Eevee.MISSILE_DELAY
class Vaporeon(games.Sprite):
"""
A Vaporeon that is ready to fight!
"""
image = games.load_image("vaporeon.png")
sound = games.load_sound("vaporeon.wav")
soundend = games.load_sound("gameover.wav")
Left = False
Right = False
MISSILE_DELAY = 25
JUMP_DELAY = 50
total = 0
health = games.Text(value=100,
size=30,
color=color.white,
top=65,
right=games.screen.width - 710,
is_collideable=False)
games.screen.add(health)
stone = games.Text(value="Vaporeon",
size=30,
color=color.blue,
top=65,
right=games.screen.width - 745,
is_collideable=False)
games.screen.add(stone)
def __init__(self, x, y):
""" Initialize Eevee like a boss, and keeps his score."""
super(Vaporeon, self).__init__(image=Vaporeon.image, # might be here
x=x, y=y)
self.missile_wait = 0
self.jump_wait = 0
def end(self):
"""Ends game"""
end_message = games.Message(value="Game Over",
size=90,
color=color.red,
x=games.screen.width / 2,
y=games.screen.height / 2,
lifetime=5 * games.screen.fps,
after_death=games.screen.quit,
is_collideable=False)
Vaporeon.soundend.play()
games.screen.add(end_message)
def die(self):
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
self.destroy()
self.end()
def die2(self):
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
def die3(self):
Eevee.total = 2
size = 9
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
self.destroy()
new_jolteon = Jolteon(x=self.x, y=self.y)
games.screen.add(new_jolteon)
def die4(self):
Eevee.total = 3
size = 9
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
self.destroy()
new_flareon = Flareon(x=self.x, y=self.y)
games.screen.add(new_flareon)
def decrease_health(self):
Vaporeon.health.value -= 10
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
def water_evolve(self):
Vaporeon.stone.value = "Vaporean"
def update(self):
"""Move and jump around! Just like Eevee in real life!!!"""
if games.keyboard.is_pressed(games.K_LEFT):
image2 = games.load_image("vaporeon2.bmp")
self.set_image(image2)
Vaporeon.Left = True
Vaporeon.Right = False
self.x -= 2
if games.keyboard.is_pressed(games.K_RIGHT):
image = games.load_image("vaporeon.png")
self.set_image(image)
Vaporeon.Left = False
Vaporeon.Right = True
self.x += 2
if games.keyboard.is_pressed(games.K_UP) and self.jump_wait == 0:
Vaporeon.sound.play()
self.jump_wait = Vaporeon.JUMP_DELAY
self.dy = -4
if self.bottom < 350:
self.dy = -self.dy
if self.top > 390:
self.dy = 0
self.y = 400
if self.top > 400:
self.dy = 0
self.y = 400
# HEALTH
# if self.overlapping_sprites:
# EeveeStats[0] = EeveeStats[0] - 10
# Eevee.health.value -= 10
if self.health.value == 0:
self.die()
# Wraps Eevee around screen lol
if self.left > games.screen.width:
self.right = 0
if self.right < 0:
self.left = games.screen.width
# If waiting until eevee can use Hyper Beam again decrease wait.
if self.missile_wait > 0:
self.missile_wait -= 1
# If waiting until eevee can jum again decrease wait.
if self.jump_wait > 0:
self.jump_wait -= 1
# Use Hyper Beam if spacebar pressed and missile wait is over
if games.keyboard.is_pressed(games.K_SPACE) and self.missile_wait == 0:
new_missile = Icebeam(self.x, self.y)
games.screen.add(new_missile)
self.missile_wait = Vaporeon.MISSILE_DELAY
class Jolteon(games.Sprite):
"""
A Jolteon that is ready to fight!
"""
image = games.load_image("jolteon.bmp")
sound = games.load_sound("jolteon.wav")
soundend = games.load_sound("gameover.wav")
Left = False
Right = False
MISSILE_DELAY = 25
JUMP_DELAY = 50
total = 0
health = games.Text(value=100,
size=30,
color=color.yellow,
top=85,
right=games.screen.width - 710,
is_collideable=False)
games.screen.add(health)
stone = games.Text(value="Jolteon",
size=30,
color=color.yellow,
top=85,
right=games.screen.width - 745,
is_collideable=False)
games.screen.add(stone)
def __init__(self, x, y):
""" Initialize Eevee like a boss, and keeps his score."""
super(Jolteon, self).__init__(image=Jolteon.image, # might be here
x=x, y=y)
self.missile_wait = 0
self.jump_wait = 0
def end(self):
"""Ends game"""
end_message = games.Message(value="Game Over",
size=90,
color=color.red,
x=games.screen.width / 2,
y=games.screen.height / 2,
lifetime=5 * games.screen.fps,
after_death=games.screen.quit,
is_collideable=False)
Vaporeon.soundend.play()
games.screen.add(end_message)
def die(self):
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
self.destroy()
self.end()
def die2(self):
Eevee.total = 1
size = 9
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
self.destroy()
new_vaporeon = Vaporeon(x=self.x, y=self.y)
games.screen.add(new_vaporeon)
def die3(self):
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
def die4(self):
Eevee.total = 3
size = 9
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
self.destroy()
new_flareon = Flareon(x=self.x, y=self.y)
games.screen.add(new_flareon)
def decrease_health(self):
Jolteon.health.value -= 10
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
def water_evolve(self):
Jolteon.stone.value = "Jolteon"
def update(self):
"""Move and jump around! Just like Eevee in real life!!!"""
if games.keyboard.is_pressed(games.K_LEFT):
image2 = games.load_image("jolteon2.bmp")
self.set_image(image2)
Jolteon.Left = True
Jolteon.Right = False
self.x -= 2
if games.keyboard.is_pressed(games.K_RIGHT):
image = games.load_image("jolteon.bmp")
self.set_image(image)
Jolteon.Left = False
Jolteon.Right = True
self.x += 2
if games.keyboard.is_pressed(games.K_UP) and self.jump_wait == 0:
Jolteon.sound.play()
self.jump_wait = Jolteon.JUMP_DELAY
self.dy = -4
if self.bottom < 350:
self.dy = -self.dy
if self.top > 390:
self.dy = 0
self.y = 400
if self.top > 400:
self.dy = 0
self.y = 400
# HEALTH
# if self.overlapping_sprites:
# EeveeStats[0] = EeveeStats[0] - 10
# Eevee.health.value -= 10
if self.health.value == 0:
self.die()
# Wraps Eevee around screen lol
if self.left > games.screen.width:
self.right = 0
if self.right < 0:
self.left = games.screen.width
# If waiting until eevee can use Hyper Beam again decrease wait.
if self.missile_wait > 0:
self.missile_wait -= 1
# If waiting until eevee can jum again decrease wait.
if self.jump_wait > 0:
self.jump_wait -= 1
# Use Hyper Beam if spacebar pressed and missile wait is over
if games.keyboard.is_pressed(games.K_SPACE) and self.missile_wait == 0:
new_missile = Lightning(self.x, self.y)
games.screen.add(new_missile)
self.missile_wait = Jolteon.MISSILE_DELAY
class Flareon(games.Sprite):
"""
A Flareon that is ready to fight!
"""
image = games.load_image("flareon.bmp")
sound = games.load_sound("flareon.wav")
soundend = games.load_sound("gameover.wav")
Left = False
Right = False
MISSILE_DELAY = 25
JUMP_DELAY = 50
total = 0
health = games.Text(value=100,
size=30,
color=color.red,
top=105,
right=games.screen.width - 710,
is_collideable=False)
games.screen.add(health)
stone = games.Text(value="Flareon",
size=30,
color=color.red,
top=105,
right=games.screen.width - 745,
is_collideable=False)
games.screen.add(stone)
def __init__(self, x, y):
""" Initialize Eevee like a boss, and keeps his score."""
super(Flareon, self).__init__(image=Flareon.image, # might be here
x=x, y=y)
self.missile_wait = 0
self.jump_wait = 0
def end(self):
"""Ends game"""
end_message = games.Message(value="Game Over",
size=90,
color=color.red,
x=games.screen.width / 2,
y=games.screen.height / 2,
lifetime=5 * games.screen.fps,
after_death=games.screen.quit,
is_collideable=False)
Flareon.soundend.play()
games.screen.add(end_message)
def die(self):
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
self.destroy()
self.end()
def die2(self):
Eevee.total = 1
size = 9
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
self.destroy()
new_vaporeon = Vaporeon(x=self.x, y=self.y)
games.screen.add(new_vaporeon)
def die3(self):
Eevee.total = 2
size = 9
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
self.destroy()
new_jolteon = Jolteon(x=self.x, y=self.y)
games.screen.add(new_jolteon)
def die4(self):
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
def decrease_health(self):
Flareon.health.value -= 10
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
def water_evolve(self):
Flareon.stone.value = "Flareon"
def update(self):
"""Move and jump around! Just like Eevee in real life!!!"""
if games.keyboard.is_pressed(games.K_LEFT):
image2 = games.load_image("flareon2.bmp")
self.set_image(image2)
Flareon.Left = True
Flareon.Right = False
self.x -= 2
if games.keyboard.is_pressed(games.K_RIGHT):
image = games.load_image("flareon.bmp")
self.set_image(image)
Flareon.Left = False
Flareon.Right = True
self.x += 2
if games.keyboard.is_pressed(games.K_UP) and self.jump_wait == 0:
Flareon.sound.play()
self.jump_wait = Flareon.JUMP_DELAY
self.dy = -4
if self.bottom < 350:
self.dy = -self.dy
if self.top > 390:
self.dy = 0
self.y = 400
if self.top > 400:
self.dy = 0
self.y = 400
# HEALTH
# if self.overlapping_sprites:
# EeveeStats[0] = EeveeStats[0] - 10
# Eevee.health.value -= 10
if self.health.value == 0:
self.die()
# Wraps Eevee around screen lol
if self.left > games.screen.width:
self.right = 0
if self.right < 0:
self.left = games.screen.width
# If waiting until eevee can use Hyper Beam again decrease wait.
if self.missile_wait > 0:
self.missile_wait -= 1
# If waiting until eevee can jum again decrease wait.
if self.jump_wait > 0:
self.jump_wait -= 1
# Use Hyper Beam if spacebar pressed and missile wait is over
if games.keyboard.is_pressed(games.K_SPACE) and self.missile_wait == 0:
new_missile = Fireball2(self.x, self.y)
games.screen.add(new_missile)
self.missile_wait = Flareon.MISSILE_DELAY
class Mew(games.Sprite):
"""
A Mew that is ready to fight!
"""
image = games.load_image("mew.png")
#sound = games.load_sound("mew.wav")
soundend = games.load_sound("gameover.wav")
Left = False
Right = False
MISSILE_DELAY = 25
JUMP_DELAY = 50
total = 0
health = 9999
def __init__(self, x, y):
""" Initialize Mew like a boss, and keeps his score."""
super(Mew, self).__init__(image=Mew.image, # might be here
x=x, y=y)
self.missile_wait = 0
self.jump_wait = 0
def end(self):
"""Ends game"""
end_message = games.Message(value="Game Over",
size=90,
color=color.red,
x=games.screen.width / 2,
y=games.screen.height / 2,
lifetime=5 * games.screen.fps,
after_death=games.screen.quit,
is_collideable=False)
Flareon.soundend.play()
games.screen.add(end_message)
def die(self):
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
self.destroy()
self.end()
def die2(self):
Eevee.total = 1
size = 9
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
self.destroy()
new_vaporeon = Vaporeon(x=self.x, y=400)
games.screen.add(new_vaporeon)
def die3(self):
Eevee.total = 2
size = 9
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
self.destroy()
new_jolteon = Jolteon(x=self.x, y=400)
games.screen.add(new_jolteon)
def die4(self):
new_explosion = Explosion(x=self.x, y=400)
games.screen.add(new_explosion)
self.destroy()
new_flareon = Flareon(x=self.x, y=400)
games.screen.add(new_flareon)
def decrease_health(self):
Mew.health -= 10
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
def update(self):
"""Move and jump around! Just like Eevee in real life!!!"""
if games.keyboard.is_pressed(games.K_LEFT):
image2 = games.load_image("mew2.png")
self.set_image(image2)
Mew.Left = True
Mew.Right = False
self.dx = -2
if games.keyboard.is_pressed(games.K_RIGHT):
image = games.load_image("mew.png")
self.set_image(image)
Mew.Left = False
Mew.Right = True
self.dx = 2
if games.keyboard.is_pressed(games.K_UP):
# Flareon.sound.play()
self.dy = -2
if games.keyboard.is_pressed(games.K_DOWN):
self.dy = 2
if games.keyboard.is_pressed(games.K_LEFT) and games.keyboard.is_pressed(games.K_RIGHT):
self.dy = 0
if games.keyboard.is_pressed(games.K_DOWN) and games.keyboard.is_pressed(games.K_UP):
self.dx = 0
# HEALTH
# if self.overlapping_sprites:
# EeveeStats[0] = EeveeStats[0] - 10
# Eevee.health.value -= 10
if self.health <= 0:
self.die()
# Wraps Eevee around screen lol
if self.left > games.screen.width:
self.right = 0
if self.right < 0:
self.left = games.screen.width
# If waiting until eevee can use Hyper Beam again decrease wait.
if self.missile_wait > 0:
self.missile_wait -= 1
# If waiting until eevee can jum again decrease wait.
# Use Hyper Beam if spacebar pressed and missile wait is over
if games.keyboard.is_pressed(games.K_SPACE) and self.missile_wait == 0:
new_missile = Psychic(self.x, self.y)
games.screen.add(new_missile)
self.missile_wait = Mew.MISSILE_DELAY
class Water_Stone(games.Sprite):
"""Makes Eevee Evolve"""
image = games.load_image("waterstone.bmp")
total = 0
health = 10
def __init__(self, game, x, y, speed=1):
"""initialize stone"""
super(Water_Stone, self).__init__(image=Water_Stone.image,
x=x, y=y, dy=speed)
x = 200,
y = 300
def die(self):
"""Use stone"""
size = 9
Water_Stone.total += 1
for sprite in self.overlapping_sprites:
sprite.die2()
self.destroy()
def decrease_health(self):
self.health -= 10
def update(self):
if self.top < 150:
dy = 0,
y = 150
if self.overlapping_sprites:
for sprite in self.overlapping_sprites:
sprite.die2()
self.destroy()
class Thunder_Stone(games.Sprite):
"""Makes Eevee Evolve"""
image = games.load_image("thunderstone.bmp")
total = 0
health = 10
def __init__(self, game, x, y, speed=1):
"""initialize stone"""
super(Thunder_Stone, self).__init__(image=Thunder_Stone.image,
x=x, y=y, dy=speed)
x = 200,
y = 300
def die(self):
"""Use stone"""
size = 9
Thunder_Stone.total += 2
for sprite in self.overlapping_sprites:
sprite.die3()
self.destroy()
def decrease_health(self):
self.health -= 10
def update(self):
if self.top < 150:
dy = 0,
y = 150
if self.overlapping_sprites:
for sprite in self.overlapping_sprites:
sprite.die3()
self.destroy()
class Fire_Stone(games.Sprite):
"""Makes Eevee Evolve"""
image = games.load_image("firestone.bmp")
total = 0
health = 10
def __init__(self, game, x, y, speed=1):
"""initialize stone"""
super(Fire_Stone, self).__init__(image=Fire_Stone.image,
x=x, y=y, dy=speed)
x = 200,
y = 300
def die(self):
"""Use stone"""
size = 9
Fire_Stone.total += 3
for sprite in self.overlapping_sprites:
sprite.die4()
self.destroy()
def decrease_health(self):
self.health -= 10
def update(self):
if self.top < 150:
dy = 0,
y = 150
if self.overlapping_sprites:
for sprite in self.overlapping_sprites:
sprite.die4()
self.destroy()
class Missile(games.Sprite):
""" Eevee getting ready to blow stuff up! """
image = games.load_image("missile.bmp")
VELOCITY_FACTOR = 5
LIFETIME = 40
BUFFER = 0
health = 10
def die(self):
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
self.destroy()
def die2(self):
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
def die3(self):
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
def die4(self):
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
def __init__(self, Eevee_x, Eevee_y):
# Calculate Hyper Beams starting position
x = Eevee_x + 100
y = Eevee_y
# calculate missile's velocity components
if Eevee.Right == True:
dx = 5
dy = 0
else:
x = Eevee_x - 100
dx = -5
dy = 0
super(Missile, self).__init__(image=Missile.image,
x=x, y=y,
dx=dx, dy=dy)
self.lifetime = Missile.LIFETIME
if Eevee.Right == True:
image2 = games.load_image("missile.bmp")
self.set_image(image2)
dx = 5
dy = 0
else:
image2 = games.load_image("missile2.bmp")
self.set_image(image2)
x = Eevee_x - 100
dx = -5
dy = 0
def decrease_health(self):
self.health -= 10
def update(self):
"""Move the Hyperbeam"""
super(Missile, self).update()
self.lifetime -= 1
if self.health == 0:
self.die()
if self.overlapping_sprites:
for sprite in self.overlapping_sprites:
sprite.decrease_health()
if sprite.x < self.x:
sprite.x -= 25 # this will make sprite fall back when hit
else:
sprite.x += 25
self.destroy()
class Bubble(games.Sprite):
""" Eevee getting ready to blow stuff up! """
image = games.load_image("bubbles.png")
VELOCITY_FACTOR = 5
LIFETIME = 40
health = 10
def die(self):
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
self.destroy()
def die2(self):
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
def die3(self):
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
def die4(self):
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
def __init__(self, Psyduck_x, Psyduck_y):
# Calculate Hyper Beams starting position
x = Psyduck_x
y = Psyduck_y
# calculate missile's velocity components
if Psyduck.Right == True:
x = Psyduck_x + 100
y = Psyduck_y
dx = 5
dy = 0
if Psyduck.Left == True:
x = Psyduck_x - 100
y = Psyduck_y
dx = -5
dy = 0
super(Bubble, self).__init__(image=Bubble.image,
x=x, y=y,
dx=dx, dy=dy)
if Psyduck.Right == True:
image2 = games.load_image("bubbles.png")
self.set_image(image2)
dx = 5
dy = 0
else:
image2 = games.load_image("bubbles2.png")
self.set_image(image2)
x = Psyduck_x - 100
dx = -5
dy = 0
def decrease_health(self):
self.health -= 10
self.lifetime = Bubble.LIFETIME
def update(self):
"""Move the Hyperbeam"""
#super(Bubble, self).update()
if self.health == 0:
self.die()
if self.overlapping_sprites:
for sprite in self.overlapping_sprites:
sprite.decrease_health()
if sprite.x < self.x:
sprite.x -= 25 # this will make sprite fall back when hit
else:
sprite.x += 25
self.destroy()
class Bone(games.Animation):
""" Eevee getting ready to blow stuff up! """
images = images = ["bone.bmp", "bone.bmp", "bone.bmp", "bone.bmp", "bone.bmp", "bone.bmp",
"bone2.bmp", "bone2.bmp", "bone2.bmp", "bone2.bmp", "bone2.bmp", "bone2.bmp"]
VELOCITY_FACTOR = 5
LIFETIME = 40
BUFFER = 0
health = 10
def die(self):
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
self.destroy()
def die2(self):
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
def die3(self):
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
def die4(self):
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
def __init__(self, Cubone_x, Cubone_y):
# Calculate Hyper Beams starting position
x = Cubone_x - 100
y = Cubone_y
# calculate missile's velocity components
if Cubone.Right == True:
x = Cubone_x + 100
y = Cubone_y # add these
dx = 5
dy = 0
else:
x = Cubone_x - 100
dx = -5
dy = 0
super(Bone, self).__init__(images=Bone.images,
x=x, y=y,
dx=dx, dy=dy)
self.lifetime = Bone.LIFETIME
def decrease_health(self):
self.health -= 10
def update(self):
"""Move the Hyperbeam"""
super(Bone, self).update()
self.lifetime -= 1
if self.health == 0:
self.die()
if self.overlapping_sprites:
for sprite in self.overlapping_sprites:
sprite.decrease_health()
if sprite.x < self.x:
sprite.x -= 25 # this will make sprite fall back when hit
else:
sprite.x += 25
self.destroy()
class Fireball(games.Sprite):
""" Eevee getting ready to blow stuff up! """
image = games.load_image("fireball.bmp")
VELOCITY_FACTOR = 5
LIFETIME = 40
health = 10
def die(self):
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
self.destroy()
def die2(self):
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
def die3(self):
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
def die4(self):
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
def __init__(self, Charizard_x, Charizard_y):
# Calculate Hyper Beams starting position
x = Charizard_x - 100
y = Charizard_y
# calculate missile's velocity components
if Charizard.Right == True:
x = Charizard_x + 100
y = Charizard_y
dx = 5
dy = 0
else:
x = Charizard_x - 100
dx = -5
dy = 0
super(Fireball, self).__init__(image=Fireball.image,
x=x, y=y,
dx=dx, dy=dy)
if Charizard.Right == True:
image2 = games.load_image("fireball2.bmp")
self.set_image(image2)
dx = 5
dy = 0
else:
image2 = games.load_image("fireball.bmp")
self.set_image(image2)
x = Charizard_x - 100
dx = -5
dy = 0
def decrease_health(self):
self.health -= 10
self.lifetime = Fireball.LIFETIME
def update(self):
"""Move the Hyperbeam"""
#super(Bubble, self).update()
if self.health == 0:
self.die()
if self.overlapping_sprites:
for sprite in self.overlapping_sprites:
sprite.decrease_health()
if sprite.x < self.x:
sprite.x -= 25 # this will make sprite fall back when hit
else:
sprite.x += 25
self.destroy()
class Shadowball(games.Sprite):
""" Eevee getting ready to blow stuff up! """
image = games.load_image("shadowball.bmp")
VELOCITY_FACTOR = 5
LIFETIME = 40
health = 10
def die(self):
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
self.destroy()
def die2(self):
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
def die3(self):
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
def die4(self):
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
def __init__(self, Mewtwo_x, Mewtwo_y):
# Calculate Hyper Beams starting position
x = Mewtwo_x - 100
y = Mewtwo_y
# calculate missile's velocity components
if Mewtwo.Right == True:
x = Mewtwo_x + 100
y = Mewtwo_y
dx = 5
dy = 0
else:
x = Mewtwo_x - 100
dx = -5
dy = 0
super(Shadowball, self).__init__(image=Shadowball.image,
x=x, y=y,
dx=dx, dy=dy)
def decrease_health(self):
self.health -= 10
self.lifetime = Shadowball.LIFETIME
def update(self):
"""Move the Hyperbeam"""
#super(Bubble, self).update()
if self.health == 0:
self.die()
if self.overlapping_sprites:
for sprite in self.overlapping_sprites:
sprite.decrease_health()
if sprite.x < self.x:
sprite.x -= 25 # this will make sprite fall back when hit
else:
sprite.x += 25
self.destroy()
class Icebeam(games.Sprite):
""" Eevee getting ready to blow stuff up! """
image = games.load_image("icebeam.bmp")
VELOCITY_FACTOR = 5
LIFETIME = 40
health = 10
def die(self):
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
self.destroy()
def die2(self):
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
def die3(self):
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
def die4(self):
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
def __init__(self, Vaporeon_x, Vaporeon_y):
# Calculate Hyper Beams starting position
x = Vaporeon_x + 100
y = Vaporeon_y
# calculate missile's velocity components
if Vaporeon.Right == True:
dx = 5
dy = 0
else:
x = Vaporeon_x - 100
dx = -5
dy = 0
super(Icebeam, self).__init__(image=Icebeam.image,
x=x, y=y,
dx=dx, dy=dy)
if Vaporeon.Right == True:
image2 = games.load_image("icebeam.bmp")
self.set_image(image2)
dx = 5
dy = 0
else:
image2 = games.load_image("icebeam.png")
self.set_image(image2) # left needs to face opposite direction
x = Vaporeon_x - 100
dx = -5
dy = 0
def decrease_health(self):
self.health -= 10
self.lifetime = Icebeam.LIFETIME
def update(self):
"""Move the Hyperbeam"""
#super(Bubble, self).update()
if self.health == 0:
self.die()
if self.overlapping_sprites:
for sprite in self.overlapping_sprites:
sprite.decrease_health() # left wrong right huge
if sprite.x < self.x:
sprite.x -= 25 # this will make sprite fall back when hit
else:
sprite.x += 25
self.destroy()
class Psychic(games.Sprite):
""" Mew getting ready to blow stuff up! """
image = games.load_image("psychic.png")
VELOCITY_FACTOR = 15
LIFETIME = 40
health = 10
def die(self):
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
self.destroy()
def die2(self):
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
def die3(self):
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
def die4(self):
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
def __init__(self, Mew_x, Mew_y):
# Calculate Hyper Beams starting position
x = Mew_x + 100
y = Mew_y
# calculate missile's velocity components
if Mew.Right == True:
dx = 5
dy = 0
else:
x = Mew_x - 100
dx = -5
dy = 0
super(Psychic, self).__init__(image=Psychic.image,
x=x, y=y,
dx=dx, dy=dy)
def decrease_health(self):
self.health -= 10
self.lifetime = Icebeam.LIFETIME
def update(self):
"""Move the Hyperbeam"""
super(Psychic, self).update()
if self.health == 0:
self.die()
if self.overlapping_sprites:
for sprite in self.overlapping_sprites:
sprite.decrease_health()
sprite.decrease_health()
sprite.decrease_health()
sprite.decrease_health()
sprite.decrease_health()
if sprite.x < self.x:
sprite.x -= 150 # this will make sprite fall back when hit
else:
sprite.x += 25
self.destroy()
class Lightning(games.Sprite):
""" Eevee getting ready to blow stuff up! """
image = games.load_image("lightning.bmp")
VELOCITY_FACTOR = 5
LIFETIME = 40
health = 10
def die(self):
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
self.destroy()
def die2(self):
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
def die3(self):
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
def die4(self):
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
def __init__(self, Jolteon_x, Jolteon_y):
# Calculate Hyper Beams starting position
x = Jolteon_x + 100
y = Jolteon_y
# calculate missile's velocity components
if Jolteon.Right == True:
dx = 5
dy = 0
else:
x = Jolteon_x - 100
dx = -5
dy = 0
super(Lightning, self).__init__(image=Lightning.image,
x=x, y=y,
dx=dx, dy=dy)
def decrease_health(self):
self.health -= 10
self.lifetime = Lightning.LIFETIME
def update(self):
"""Move the Hyperbeam"""
#super(Bubble, self).update()
if self.health == 0:
self.die()
if self.overlapping_sprites:
for sprite in self.overlapping_sprites:
sprite.decrease_health()
if sprite.x < self.x:
sprite.x -= 25 # this will make sprite fall back when hit
else:
sprite.x += 25
self.destroy()
class Fireball2(games.Sprite):
""" Eevee getting ready to blow stuff up! """
image = games.load_image("fireball2.bmp")
VELOCITY_FACTOR = 5
LIFETIME = 40
health = 10
def die(self):
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
self.destroy()
def die2(self):
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
def die3(self):
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
def die4(self):
new_explosion = Explosion(x=self.x, y=self.y)
games.screen.add(new_explosion)
def __init__(self, Flareon_x, Flareon_y):
# Calculate Hyper Beams starting position
x = Flareon_x + 100
y = Flareon_y
# calculate missile's velocity components
if Flareon.Right == True:
dx = 5
dy = 0
else:
x = Flareon_x - 100
dx = -5
dy = 0
super(Fireball2, self).__init__(image=Fireball2.image,
x=x, y=y,
dx=dx, dy=dy)
if Flareon.Right == True:
image2 = games.load_image("fireball2.bmp")
self.set_image(image2)
dx = 5
dy = 0
else:
image2 = games.load_image("fireball.bmp")
self.set_image(image2)
x = Flareon_x - 100
dx = -5
dy = 0
def decrease_health(self):
self.health -= 10
self.lifetime = Fireball2.LIFETIME
def update(self):
"""Move the Hyperbeam"""
#super(Bubble, self).update()
if self.health == 0:
self.die()
if self.overlapping_sprites:
for sprite in self.overlapping_sprites:
sprite.decrease_health()
if sprite.x < self.x:
sprite.x -= 25 # this will make sprite fall back when hit
else:
sprite.x += 25
self.destroy()
class Explosion(games.Animation):
""" Explosion animation. """
sound = games.load_sound("boom.wav")
images = ["explosion1.bmp",
"explosion2.bmp",
"explosion3.bmp",
"explosion4.bmp",
"explosion5.bmp",
"explosion6.bmp",
"explosion7.bmp",
"explosion8.bmp",
"explosion9.bmp"]
def __init__(self, x, y):
super(Explosion, self).__init__(images=Explosion.images,
x=x, y=y,
repeat_interval=4, n_repeats=1,
is_collideable=False)
Explosion.sound.play()
class Game(object):
"""Setting up this boss game"""
def __init__(self):
"""Initialize Game Object"""
# sets level
self.level = 0
# put sound here
self.sound = games.load_sound("psyduck.wav")
# health here
"""self.health = games.Text(value = 100,
size = 30,
color = color.red,
top = 5,
right = games.screen.width - 10,
is_collideable = False)
games.screen.add(self.health)"""
# Create Eevee
self.Eevee = Eevee(x=100,
y=400)
games.screen.add(self.Eevee)
# Create Psyduck
self.Psyduck = Psyduck(game=self,
x=600,
y=400)
games.screen.add(self.Psyduck)
self.sound.play()
# Create Cubone
if Psyduck.total == 0: # Make Charizard or Blastoise or snorlax
self.cubone = Cubone(game=self,
x=600,
y=400)
games.screen.add(self.Cubone)
# Create Charizard
if Psyduck.total == 0 and Cubone.total == 0: # Make Charizard
self.charizard = Charizard(game=self,
x=600,
y=400)
games.screen.add(self.Charizard)
# Create Snorlax
if Psyduck.total == 0 and Cubone.total == 0 and Charizard.total == 0: # Make Snorlax
self.snorlax = Snorlax(game=self,
x=600,
y=400)
games.screen.add(self.Snorlax)
# Create Mewtwo
if Psyduck.total == 0 and Cubone.total == 0 and Charizard.total == 0 and Snorlax.total: # Make Snorlax
self.mewtwo = Mewtwo(game=self,
x=600,
y=400)
games.screen.add(self.Mewtwo)
# Evolve into Vaporeon
if Eevee.total == 1:
self.vaporeon = Vaporeon(x=200,
y=400)
games.screen.add(self.vaporeon)
# Evolve into Jolteon
if Eevee.total == 2:
self.jolteon = Jolteon(x=200,
y=400)
games.screen.add(self.jolteon)
# Evolve into Flareon
if Eevee.total == 3:
self.flareon = Flareon(x=200,
y=400)
games.screen.add(self.flareon)
if Eevee.total == 4:
self.mew = Mew(x=200,
y=400)
# randomize stones
if Psyduck.stone == 0:
Psyduck.total = random.randint(1, 3)
# Create Waterstone
if Psyduck.stone == 1:
self.waterstone = Water_Stone(game=self,
x=400,
y=2)
games.screen.add(self.waterstone)
# Create thunderstone
if Psyduck.stone == 2:
self.thunderstone = Thunder_Stone(game=self,
x=300,
y=2)
games.screen.add(self.thunderstone)
# Create Firestone
if Psyduck.stone == 3:
self.firestone = Fire_Stone(game=self,
x=200,
y=2)
games.screen.add(self.firestone)
def play(self):
""" Play game """
# music
games.music.load("clocks.mid")
games.music.play(-100) # ENABLE THIS BEFORE TURNING IN!!!
# load and set background
armory_image = games.load_image("armory.jpg")
games.screen.background = armory_image
# start it up
games.screen.mainloop()
def end(self):
""" End the game. """
# show 'Game Over' for 5 seconds
end_message = games.Message(value="Game Over",
size=90,
color=color.red,
x=games.screen.width / 2,
y=games.screen.height / 2,
lifetime=5 * games.screen.fps,
after_death=games.screen.quit,
is_collideable=False)
games.screen.add(end_message)
def win(self):
""" End the game win. """
# show 'Game Over' for 5 seconds
end_message = games.Message(value="You Win!",
size=90,
color=color.blue,
x=games.screen.width / 2,
y=games.screen.height / 2,
lifetime=5 * games.screen.fps,
after_death=games.screen.quit,
is_collideable=False)
games.screen.add(end_message)
def main():
pokekombat = Game()
pokekombat.play()
main()
| 30.747949
| 130
| 0.516506
| 7,928
| 67,461
| 4.278759
| 0.039859
| 0.071989
| 0.063558
| 0.060138
| 0.841047
| 0.816432
| 0.792907
| 0.756176
| 0.737928
| 0.725989
| 0
| 0.028643
| 0.380531
| 67,461
| 2,193
| 131
| 30.76197
| 0.783082
| 0.102029
| 0
| 0.781017
| 0
| 0
| 0.019055
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.100496
| false
| 0
| 0.001241
| 0
| 0.207196
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d3178120713fa7421d191ef97270d9d0729b6662
| 35,673
|
py
|
Python
|
srv6_utils.py
|
everywan-io/srv6-sdn-mininet
|
aa99d9b277c3823489d07bfc620fe08dd6fa30e7
|
[
"Apache-2.0"
] | null | null | null |
srv6_utils.py
|
everywan-io/srv6-sdn-mininet
|
aa99d9b277c3823489d07bfc620fe08dd6fa30e7
|
[
"Apache-2.0"
] | null | null | null |
srv6_utils.py
|
everywan-io/srv6-sdn-mininet
|
aa99d9b277c3823489d07bfc620fe08dd6fa30e7
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
##############################################################################################
# Copyright (C) 2018 Pier Luigi Ventre - (CNIT and University of Rome "Tor Vergata")
# Copyright (C) 2018 Stefano Salsano - (CNIT and University of Rome "Tor Vergata")
# www.uniroma2.it/netgroup - www.cnit.it
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Utils for Segment Routing IPv6
#
# @author Pier Luigi Ventre <pierventre@hotmail.com>
# @author Stefano Salsano <stefano.salsano@uniroma2.it>
# General imports
from __future__ import absolute_import, division, print_function
import os
import shutil
import sys
import random
# Mininet dependencies
from mininet.node import Host
# SRv6 dependencies
from srv6_generators import RANGE_FOR_AREA_0
################## Setup these variables ##################
# Interval between two hello packets (in seconds)
HELLO_INTERVAL = 1
# How long we should be wait for hello packets
# before we declare the neighbor dead (in seconds)
DEAD_INTERVAL = 3
# How long we should be wait before retransmitting
# Database Description and Link State Request packets (in seconds)
RETRANSMIT_INTERVAL = 3
# The maximum time allowed between sending unsolicited
# multicast router advertisement from the interface (in seconds)
RA_INTERVAL = 10
###########################################################
# This workaround solves the issue of python commands
# executed outside the virtual environment
PYTHON_PATH = sys.executable
# Filenames of the bash scripts
#
# nodes.sh file containing the nodes
NODES_SH = 'nodes.sh'
# neighs.sh containing the neighbors
NEIGHS_SH = 'neighs.sh'
# devid.sh file containing the device ID
DEVICEID_SH = 'devid.sh'
# hostname.sh file containing the hostname
HOSTNAME_SH = 'hostname.sh'
# interfaces.sh file containing the interfaces
INTERFACES_SH = 'interfaces.sh'
# ips.sh file containing the ips
IPS_SH = 'ips.sh'
# Initialize random seed
random.seed(0)
# Generate a random UUID used to identify the node
def generate_uuid():
# Example of UUID: 7a0525c1-22e9-cc50-d44d-5149c7524f1f
global seed_initiated
seq = 'abcdef1234567890'
uuid = ''
# First block
for _ in range(0, 8):
uuid += random.choice(seq)
uuid += '-'
# Second block
for _ in range(0, 4):
uuid += random.choice(seq)
uuid += '-'
# Third block
for _ in range(0, 4):
uuid += random.choice(seq)
uuid += '-'
# Fourth block
for _ in range(0, 4):
uuid += random.choice(seq)
uuid += '-'
# Fifth block
for _ in range(0, 12):
uuid += random.choice(seq)
# Return the UUID
return uuid
# Abstraction to model a SRv6Router
class SRv6Router(Host):
def __init__(self, name, *args, **kwargs):
dirs = ['/var/mininet']
Host.__init__(self, name, privateDirs=dirs, *args, **kwargs)
self.dir = "/tmp/%s" % name
self.nets = []
if os.path.exists(self.dir):
shutil.rmtree(self.dir)
if not os.path.exists(self.dir):
os.makedirs(self.dir)
if kwargs.get('debug', False):
self.exec_cmd = self.cmdPrint
else:
self.exec_cmd = self.cmd
self.exec_cmd("for session in $(screen -ls | grep -o '[0-9]*\.%s'); do screen -S ${session} -X quit; done" % self.name)
# Config hook
def config(self, **kwargs):
# Init steps
Host.config(self, **kwargs)
# Iterate over the interfaces
for intf in self.intfs.values():
# Remove any configured address
self.exec_cmd('ifconfig %s 0' % intf.name)
# Let's write the hostname in /var/mininet/hostname
self.exec_cmd("echo '" + self.name + "' > /var/mininet/hostname")
# Let's write the hostname
self.exec_cmd("echo 'HOSTNAME=%s' > %s/%s" %
(self.name, self.dir, HOSTNAME_SH))
# Let's write the id
uuid = generate_uuid()
self.exec_cmd("echo 'DEVICEID=%s' > %s/%s" %
(uuid, self.dir, DEVICEID_SH))
# Let's write the neighbors
if kwargs.get('neighs', None) is not None:
neighs_sh = '%s/%s' % (self.dir, NEIGHS_SH)
with open(neighs_sh, 'w') as outfile:
# Create header
neighs = "declare -a NEIGHS=("
# Iterate over neighbor ips
for neigh in kwargs['neighs']:
# Add the neighs one by one
neighs = neighs + "%s " % neigh
if kwargs['neighs'] != []:
# Eliminate last character
neighs = neighs[:-1] + ")\n"
else:
neighs = neighs + ")\n"
# Write on the file
outfile.write(neighs)
# Let's write the interfaces
if kwargs.get('interfaces', None) is not None:
interfaces_sh = '%s/%s' % (self.dir, INTERFACES_SH)
with open(interfaces_sh, 'w') as outfile:
# Create header
interfaces = "declare -A INTERFACES=("
# Iterate over interfaces
for (neigh, intf) in kwargs['interfaces']:
# Add the interfaces one by one
interfaces = interfaces + '[%s]=%s ' % (neigh, intf)
if kwargs['interfaces'] != []:
# Eliminate last character
interfaces = interfaces[:-1] + ")\n"
else:
interfaces = interfaces + ")\n"
# Write on the file
outfile.write(interfaces)
# Retrieve nets
self.nets = list()
if kwargs.get('nets', None) is not None:
self.nets = kwargs['nets']
# If requested
if kwargs.get('sshd', False):
# Let's start sshd daemon in the hosts
self.exec_cmd('/usr/sbin/sshd -D &')
# Configure the loopback address
if kwargs.get('loopbackip', None) is not None:
self.exec_cmd('ip a a %s dev lo' % (kwargs['loopbackip']))
self.nets.append({
'intf': 'lo',
'ip': kwargs['loopbackip'],
'net': kwargs['loopbackip']})
# Enable IPv6 forwarding
self.exec_cmd("sysctl -w net.ipv6.conf.all.forwarding=1")
# Enable IPv4 forwarding
self.exec_cmd("sysctl -w net.ipv4.conf.all.forwarding=1")
# Disable Reverse Path Forwarding filter
self.exec_cmd("sysctl -w net.ipv4.conf.all.rp_filter=0")
# Enable SRv6 on the interface
self.exec_cmd("sysctl -w net.ipv6.conf.all.seg6_enabled=1")
# Disable RA accept (stateless address autoconfiguration)
self.exec_cmd("sysctl -w net.ipv6.conf.all.accept_ra=0")
# Force Linux to keep all IPv6 addresses on an interface down event
self.exec_cmd("sysctl -w net.ipv6.conf.all.keep_addr_on_down=1")
# Iterate over the interfaces
for intf in self.intfs.values():
# Enable IPv6 forwarding
self.exec_cmd(
"sysctl -w net.ipv6.conf.%s.forwarding=1" % intf.name)
# Enable IPv4 forwarding
self.exec_cmd(
"sysctl -w net.ipv4.conf.%s.forwarding=1" % intf.name)
# Disable Reverse Path Forwarding filter
self.exec_cmd("sysctl -w net.ipv4.conf.%s.rp_filter=0" % intf.name)
# Enable SRv6 on the interface
self.exec_cmd(
"sysctl -w net.ipv6.conf.%s.seg6_enabled=1" % intf.name)
# Disable RA accept (stateless address autoconfiguration)
self.exec_cmd("sysctl -w net.ipv6.conf.%s.accept_ra=0" % intf.name)
# Force Linux to keep all IPv6 addresses on an interface down event
self.exec_cmd(
"sysctl -w net.ipv6.conf.%s.keep_addr_on_down=1" % intf.name)
# Zebra and Quagga config
if len(self.nets) > 0:
if kwargs.get('use_ipv4_addressing', False):
self.start_zebra_ipv4(**kwargs)
if kwargs.get('enable_ospf', False):
self.start_ospfd(**kwargs)
self.start_staticd_ipv4(**kwargs)
else:
self.start_zebra_ipv6(**kwargs)
if kwargs.get('enable_ospf', False):
self.start_ospf6d(**kwargs)
self.start_staticd_ipv6(**kwargs)
# Let's write the interfaces
if kwargs.get('nodes', None) is not None:
nodes_sh = '%s/%s' % (self.dir, NODES_SH)
with open(nodes_sh, 'w') as outfile:
# Create header
nodes = "declare -A NODES=("
# Iterate over nodes
for node, ip in kwargs['nodes'].items():
# Add the nodes one by one
nodes = nodes + '[%s]=%s ' % (node, ip)
if kwargs['nodes'] != []:
# Eliminate last character
nodes = nodes[:-1] + ")\n"
else:
nodes = nodes + ")\n"
# Write on the file
outfile.write(nodes)
# Let's write the ips
ips_sh = '%s/%s' % (self.dir, IPS_SH)
with open(ips_sh, 'w') as outfile:
# Create header
ips = "declare -A IPS=("
# Iterate over ips
for net in self.nets:
# Add the ips one by one
ip = net['ip'].split('/')[0]
ips = ips + '[%s]=%s ' % (net['intf'], ip)
if self.nets != []:
# Eliminate last character
ips = ips[:-1] + ")\n"
else:
ips = ips + ")\n"
# Write on the file
outfile.write(ips)
# Add python path to PATH environment variable
# This solves the issue of python commands executed
# outside the virtual environment
self.exec_cmd('export PATH=%s:$PATH' % os.path.dirname(PYTHON_PATH))
self.exec_cmd('export SCREENDIR=/run/screen/S-%s' % self.name)
# Run scripts
scripts = ''
for script in kwargs.get('scripts', []):
# Change directory to the host dir
self.exec_cmd('cd %s' % self.dir)
# Get full path
script_path = os.path.abspath(os.path.join('scripts', script))
# Append the script to the scripts
scripts += script_path + ' & '
if scripts != '':
# This line forces screen to keep opened
# after the scripts termination
scripts = scripts[:-3] + '; exec bash'
# Execute the scripts
self.exec_cmd("screen -dmS %s bash -c '%s'" % (self.name, scripts))
# Configure and start zebra for IPv6 emulation
def start_zebra_ipv6(self, **kwargs):
# Zebra and FRR config
if len(self.nets) > 0:
zebra = open("%s/zebra.conf" % self.dir, 'w')
zebra.write("! -*- zebra -*-\n!\nhostname %s\n" %
self.name)
zebra.write("password srv6\nenable password srv6\n"
"log file %s/zebra.log\n!\n" % self.dir)
# Iterate over the nets and build interface part of the configs
for net in self.nets:
# Non-loopback interface
if net['intf'] != 'lo':
# Set the IPv6 address and the network
# discovery prefix in the zebra configuration
zebra.write("interface %s\n"
" link-detect\n"
" bandwidth %s\n"
" no ipv6 nd suppress-ra\n"
" ipv6 nd ra-interval %s\n"
" ipv6 address %s\n"
" ipv6 nd prefix %s\n!\n"
% (net['intf'], min(net['bw']*1000, 100000),
RA_INTERVAL, net['ip'], net['net']))
zebra.close()
# Right permission and owners
self.exec_cmd("chown frr /var/run")
self.exec_cmd("chown frr %s/*.conf" % self.dir)
self.exec_cmd("chown frr %s/." % self.dir)
self.exec_cmd("chmod 640 %s/*.conf" % self.dir)
# Start daemons
self.exec_cmd("zebra -f %s/zebra.conf -d -z %s/zebra.sock -i "
"%s/zebra.pid" % (self.dir, self.dir, self.dir))
def start_staticd_ipv4(self, **kwargs):
staticd = open("%s/staticd.conf" % self.dir, 'w')
staticd.write("! -*- staticd -*-\n!\nhostname %s\n" % self.name)
staticd.write("password srv6\nlog file %s/staticd.log\n!\n" %
self.dir)
# Configure the default via
default_via = kwargs.get('default_via', None)
if default_via is not None:
staticd.write("ip route %s %s\n" % ('0.0.0.0/0', default_via))
# Configure the routes
if kwargs.get('routes', None):
for route in kwargs['routes']:
dest = route['dest']
via = route['via']
staticd.write("ip route %s %s\n" % (dest, via))
staticd.close()
self.exec_cmd("chown frr /var/run")
self.exec_cmd("chown frr %s/*.conf" % self.dir)
self.exec_cmd("chown frr %s/." % self.dir)
self.exec_cmd("chmod 640 %s/*.conf" % self.dir)
# Start daemons
self.exec_cmd("staticd -f %s/staticd.conf -d -z %s/zebra.sock -i "
"%s/staticd.pid" % (self.dir, self.dir, self.dir))
def start_staticd_ipv6(self, **kwargs):
staticd = open("%s/staticd.conf" % self.dir, 'w')
staticd.write("! -*- staticd -*-\n!\nhostname %s\n" % self.name)
staticd.write("password srv6\nlog file %s/staticd.log\n!\n" %
self.dir)
# Configure the default via
default_via = kwargs.get('default_via', None)
if default_via is not None:
staticd.write("ipv6 route %s %s\n" % ('::/0', default_via))
# Add static route for router network
if kwargs.get('routernet', None):
routernet = kwargs['routernet']
staticd.write("ipv6 route %s lo\n!\n" % routernet)
# Configure the routes
if kwargs.get('routes', None):
for route in kwargs['routes']:
dest = route['dest']
via = route['via']
staticd.write("ipv6 route %s %s\n" % (dest, via))
staticd.close()
self.exec_cmd("chown frr /var/run")
self.exec_cmd("chown frr %s/*.conf" % self.dir)
self.exec_cmd("chown frr %s/." % self.dir)
self.exec_cmd("chmod 640 %s/*.conf" % self.dir)
# Start zebra daemon
self.exec_cmd("staticd -f %s/staticd.conf -d -z %s/zebra.sock -i "
"%s/staticd.pid" % (self.dir, self.dir, self.dir))
# Configure and start ospf6d for IPv6 emulation
def start_ospf6d(self, **kwargs):
# Zebra and Quagga config
if len(self.nets) > 0:
ospfd = open("%s/ospf6d.conf" % self.dir, 'w')
ospfd.write("! -*- ospf6 -*-\n!\nhostname %s\n" % self.name)
ospfd.write("password srv6\n"
"log file %s/ospf6d.log\n!\n" %
self.dir)
# Iterate over the nets and build interface part of the configs
for net in self.nets:
# Link cost for the interface
cost = net.get('cost', None)
# Non-loopback interface
if net['intf'] != 'lo':
if net['stub']:
# Stub network
# Set OSPF6 parameters and mark the network as
# passive in order to advertise the interface as
# a stub link
if cost is not None:
ospfd.write("interface %s\n"
" ipv6 ospf6 passive\n"
" ipv6 ospf6 cost %s\n"
" ipv6 ospf6 hello-interval %s\n"
" ipv6 ospf6 dead-interval %s\n"
" ipv6 ospf6 retransmit-interval %s\n"
"!\n"
% (net['intf'], cost, HELLO_INTERVAL,
DEAD_INTERVAL, RETRANSMIT_INTERVAL))
else:
ospfd.write("interface %s\n"
" ipv6 ospf6 passive\n"
" ipv6 ospf6 hello-interval %s\n"
" ipv6 ospf6 dead-interval %s\n"
" ipv6 ospf6 retransmit-interval %s\n"
"!\n"
% (net['intf'], HELLO_INTERVAL,
DEAD_INTERVAL, RETRANSMIT_INTERVAL))
else:
# Transit network
if cost is not None:
ospfd.write("interface %s\n"
" no ipv6 ospf6 passive\n"
" ipv6 ospf6 cost %s\n"
" ipv6 ospf6 hello-interval %s\n"
" ipv6 ospf6 dead-interval %s\n"
" ipv6 ospf6 retransmit-interval %s\n"
"!\n"
% (net['intf'], cost, HELLO_INTERVAL,
DEAD_INTERVAL, RETRANSMIT_INTERVAL))
else:
ospfd.write("interface %s\n"
" no ipv6 ospf6 passive\n"
" ipv6 ospf6 hello-interval %s\n"
" ipv6 ospf6 dead-interval %s\n"
" ipv6 ospf6 retransmit-interval %s\n"
"!\n"
% (net['intf'], HELLO_INTERVAL,
DEAD_INTERVAL, RETRANSMIT_INTERVAL))
# Finishing ospf6d conf
if kwargs.get('routerid', None):
routerid = kwargs['routerid']
ospfd.write("router ospf6\n"
" ospf6 router-id %s\n"
" redistribute static\n!\n" % routerid)
ospfd.write(" area 0.0.0.0 range %s\n" % RANGE_FOR_AREA_0)
# Iterate again over the nets to finish area part
for net in self.nets:
if net.get('is_private', False):
ospfd.write(" no interface %s area 0.0.0.0\n" %
(net['intf']))
else:
ospfd.write(" interface %s area 0.0.0.0\n" % (net['intf']))
ospfd.write("!\n")
ospfd.close()
# Right permission and owners
self.exec_cmd("chown frr /var/run")
self.exec_cmd("chown frr %s/*.conf" % self.dir)
self.exec_cmd("chown frr %s/." % self.dir)
self.exec_cmd("chmod 640 %s/*.conf" % self.dir)
# Start ospf6d daemon
self.exec_cmd("ospf6d -f %s/ospf6d.conf -d -z %s/zebra.sock -i "
"%s/ospf6d.pid" % (self.dir, self.dir, self.dir))
# Configure and start zebra for IPv4 emulation
def start_zebra_ipv4(self, **kwargs):
# Zebra and Quagga config
if len(self.nets) > 0:
zebra = open("%s/zebra.conf" % self.dir, 'w')
zebra.write("! -*- zebra -*-\n!\nhostname %s\n" %
self.name)
zebra.write("password srv6\nenable password srv6\n"
"log file %s/zebra.log\n!\n" % self.dir)
# Iterate over the nets and build interface part of the configs
for net in self.nets:
# Non-loopback interface
if net['intf'] != 'lo':
# Set the IPv6 address and the network
# discovery prefix in the zebra configuration
zebra.write("interface %s\n"
" link-detect\n"
" bandwidth %s\n"
" ip address %s\n!\n"
% (net['intf'], min(net['bw']*1000, 100000),
net['ip']))
zebra.close()
# Right permission and owners
self.exec_cmd("chown frr /var/run")
self.exec_cmd("chown frr %s/*.conf" % self.dir)
self.exec_cmd("chown frr %s/." % self.dir)
self.exec_cmd("chmod 640 %s/*.conf" % self.dir)
# Start zebra daemon
self.exec_cmd("zebra -f %s/zebra.conf -d -z %s/zebra.sock -i "
"%s/zebra.pid" % (self.dir, self.dir, self.dir))
# Configure and start ospfd for IPv4 emulation
def start_ospfd(self, **kwargs):
# Zebra and Quagga config
if len(self.nets) > 0:
ospfd = open("%s/ospfd.conf" % self.dir, 'w')
ospfd.write("! -*- ospf -*-\n!\nhostname %s\n" % self.name)
ospfd.write("password srv6\nlog file %s/ospfd.log\n!\n" %
self.dir)
# Iterate over the nets and build interface part of the configs
for net in self.nets:
# Link cost for the interface
cost = net.get('cost', None)
# Non-loopback interface
if net['intf'] != 'lo':
# Check if the interface is private
enable_ospf = 'no ' if net.get('is_private', False) else ''
if net['stub']:
# Stub network
# Set OSPF6 parameters and mark the network as
# passive in order to advertise the interface as
# a stub link
if cost is not None:
ospfd.write("interface %s\n"
" %sip ospf area 0.0.0.0\n"
" ip ospf cost %s\n"
" ip ospf hello-interval %s\n"
" ip ospf dead-interval %s\n"
" ip ospf retransmit-interval %s\n"
"!\n"
% (net['intf'], enable_ospf, cost,
HELLO_INTERVAL, DEAD_INTERVAL,
RETRANSMIT_INTERVAL))
else:
ospfd.write("interface %s\n"
" %sip ospf area 0.0.0.0\n"
" ip ospf hello-interval %s\n"
" ip ospf dead-interval %s\n"
" ip ospf retransmit-interval %s\n"
"!\n"
% (net['intf'], enable_ospf,
HELLO_INTERVAL, DEAD_INTERVAL,
RETRANSMIT_INTERVAL))
else:
# Transit network
if cost is not None:
ospfd.write("interface %s\n"
" %sip ospf area 0.0.0.0\n"
" ip ospf cost %s\n"
" ip ospf hello-interval %s\n"
" ip ospf dead-interval %s\n"
" ip ospf retransmit-interval %s\n"
"!\n"
% (net['intf'], enable_ospf, cost,
HELLO_INTERVAL, DEAD_INTERVAL,
RETRANSMIT_INTERVAL))
else:
ospfd.write("interface %s\n"
" %sip ospf area 0.0.0.0\n"
" ip ospf hello-interval %s\n"
" ip ospf dead-interval %s\n"
" ip ospf retransmit-interval %s\n"
"!\n"
% (net['intf'], enable_ospf,
HELLO_INTERVAL, DEAD_INTERVAL,
RETRANSMIT_INTERVAL))
# Finishing ospf6d conf
if kwargs.get('routerid', None):
routerid = kwargs['routerid']
ospfd.write("router ospf\n"
" ospf router-id %s\n"
" redistribute static\n!\n" % routerid)
for net in self.nets:
if net.get('stub', False):
ospfd.write(" passive-interface %s\n!\n" % net['intf'])
else:
ospfd.write(" no passive-interface %s\n!\n" % net['intf'])
ospfd.write("!\n")
ospfd.close()
# Right permission and owners
self.exec_cmd("chown frr /var/run")
self.exec_cmd("chown frr %s/*.conf" % self.dir)
self.exec_cmd("chown frr %s/." % self.dir)
self.exec_cmd("chmod 640 %s/*.conf" % self.dir)
# Start ospfd daemon
self.exec_cmd("ospfd -f %s/ospfd.conf -d -z %s/zebra.sock -i "
"%s/ospfd.pid" % (self.dir, self.dir, self.dir))
# Terminate node
def terminate(self):
# Stop screen session
#self.exec_cmd('screen -XS %s quit' % self.name)
self.exec_cmd("for session in $(screen -ls | grep -o '[0-9]*\.%s'); do screen -S ${session} -X quit; done" % self.name)
Host.terminate(self)
# Clean up the environment
def cleanup(self):
Host.cleanup(self)
# Rm dir
if os.path.exists(self.dir):
shutil.rmtree(self.dir)
# Abstraction to model a MHost
class MHost(Host):
def __init__(self, name, *args, **kwargs):
dirs = ['/var/mininet']
Host.__init__(self, name, privateDirs=dirs, *args, **kwargs)
self.dir = "/tmp/%s" % name
self.nets = []
if os.path.exists(self.dir):
shutil.rmtree(self.dir)
if not os.path.exists(self.dir):
os.makedirs(self.dir)
if kwargs.get('debug', False):
self.exec_cmd = self.cmdPrint
else:
self.exec_cmd = self.cmd
self.exec_cmd("for session in $(screen -ls | grep -o '[0-9]*\.%s'); do screen -S ${session} -X quit; done" % self.name)
# Config hook
def config(self, **kwargs):
# Init steps
Host.config(self, **kwargs)
# Iterate over the interfaces
for intf in self.intfs.values():
# Remove any configured address
self.exec_cmd('ip a flush dev %s scope global' % intf.name)
# Let's write the hostname in /var/mininet/hostname
self.exec_cmd("echo '" + self.name + "' > /var/mininet/hostname")
# Let's write the hostname
self.exec_cmd("echo 'HOSTNAME=%s' > %s/%s" %
(self.name, self.dir, HOSTNAME_SH))
# Let's write the id
uuid = generate_uuid()
self.exec_cmd("echo 'DEVICEID=%s' > %s/%s" %
(uuid, self.dir, DEVICEID_SH))
# Let's write the neighbors
if kwargs.get('neighs', None) is not None:
neighs_sh = '%s/%s' % (self.dir, NEIGHS_SH)
with open(neighs_sh, 'w') as outfile:
# Create header
neighs = "declare -a NEIGHS=("
# Iterate over neighbors
for neigh in kwargs['neighs']:
# Add the neighs one by one
neighs = neighs + "%s " % neigh
if kwargs['neighs'] != []:
# Eliminate last character
neighs = neighs[:-1] + ")\n"
else:
neighs = neighs + ")\n"
# Write on the file
outfile.write(neighs)
# Let's write the interfaces
if kwargs.get('interfaces', None) is not None:
interfaces_sh = '%s/%s' % (self.dir, INTERFACES_SH)
with open(interfaces_sh, 'w') as outfile:
# Create header
interfaces = "declare -A INTERFACES=("
# Iterate over interfaces
for (neigh, intf) in kwargs['interfaces']:
# Add the interfaces one by one
interfaces = interfaces + '[%s]=%s ' % (neigh, intf)
if kwargs['interfaces'] != []:
# Eliminate last character
interfaces = interfaces[:-1] + ")\n"
else:
interfaces = interfaces + ")\n"
# Write on the file
outfile.write(interfaces)
# Let's write the ips
ips_sh = '%s/%s' % (self.dir, IPS_SH)
with open(ips_sh, 'w') as outfile:
# Create header
ips = "declare -A IPS=("
# Iterate over ips
for net in self.nets:
# Add the ips one by one
ip = net['ip'].split('/')[0]
ips = ips + '[%s]=%s ' % (net['intf'], ip)
if self.nets != []:
# Eliminate last character
ips = ips[:-1] + ")\n"
else:
ips = ips + ")\n"
# Write on the file
outfile.write(ips)
# Retrieve nets
self.nets = list()
if kwargs.get('nets', None) is not None:
self.nets = kwargs['nets']
# If requested
if kwargs.get('sshd', False):
# Let's start sshd daemon in the hosts
self.exec_cmd('/usr/sbin/sshd -D &')
# Configure the loopback address
if kwargs.get('loopbackip', None) is not None:
self.exec_cmd('ip a a %s dev lo' % (kwargs['loopbackip']))
self.nets.append({
'intf': 'lo',
'ip': kwargs['loopbackip'],
'net': kwargs['loopbackip']})
# Disable IPv6 address autoconfiguration
self.exec_cmd('sysctl -w net.ipv6.conf.all.autoconf=0')
# Enable RA accept (stateless address autoconfiguration)
self.exec_cmd('sysctl -w net.ipv6.conf.all.accept_ra=1')
# Force Linux to keep all IPv6 addresses on an interface down event
self.exec_cmd("sysctl -w net.ipv6.conf.all.keep_addr_on_down=1")
# Iterate over the interfaces
for intf in self.intfs.values():
# Disable IPv6 address autoconfiguration on the interface
# The addresses are configured by this script
self.exec_cmd("sysctl -w net.ipv6.conf.%s.autoconf=0" % intf.name)
# Accept Router Advertisements messages
# Used to set a default via in the routing tables
self.exec_cmd("sysctl -w net.ipv6.conf.%s.accept_ra=1" % intf.name)
# Force Linux to keep all IPv6 addresses on an interface down event
self.exec_cmd("sysctl -w net.ipv6.conf.%s.keep_addr_on_down=1"
% intf.name)
for net in self.nets:
# Set the address
self.exec_cmd('ip a a %s dev %s' % (net['ip'], net['intf']))
# Configure the default via
default_via = kwargs.get('default_via', None)
if default_via is not None:
self.exec_cmd('ip r d default')
self.exec_cmd('ip -6 r d default')
self.exec_cmd('ip r a default via %s' % default_via)
# Configure the routes
for route in kwargs.get('routes', []):
dest = route['dest']
via = route['via']
self.exec_cmd("ip route add %s via %s\n" % (dest, via))
# Let's write the interfaces
if kwargs.get('nodes', None) is not None:
nodes_sh = '%s/%s' % (self.dir, NODES_SH)
with open(nodes_sh, 'w') as outfile:
# Create header
nodes = "declare -A NODES=("
# Iterate over nodes
for node, ip in kwargs['nodes'].items():
# Add the nodes one by one
nodes = nodes + '[%s]=%s ' % (node, ip)
if kwargs['nodes'] != []:
# Eliminate last character
nodes = nodes[:-1] + ")\n"
else:
nodes = nodes + ")\n"
# Write on the file
outfile.write(nodes)
# Add python path to PATH environment variable
# This solves the issue of python commands executed
# outside the virtual environment
self.exec_cmd('export PATH=%s:$PATH' % os.path.dirname(PYTHON_PATH))
self.exec_cmd('export SCREENDIR=/run/screen/S-%s' % self.name)
# Run scripts
scripts = ''
for script in kwargs.get('scripts', []):
# Change directory to the host dir
self.exec_cmd('cd %s' % self.dir)
# Get full path
script_path = os.path.abspath(os.path.join('scripts', script))
# Append the script to the scripts
scripts += script_path + ' & '
if scripts != '':
# This line forces screen to keep opened
# after the scripts termination
scripts = scripts[:-3] + '; exec bash'
# Execute the scripts
self.exec_cmd("screen -dmS %s bash -c '%s'" % (self.name, scripts))
# Terminate node
def terminate(self):
# Stop screen session
#self.exec_cmd('screen -XS %s quit' % self.name)
self.exec_cmd("for session in $(screen -ls | grep -o '[0-9]*\.%s'); do screen -S ${session} -X quit; done" % self.name)
Host.terminate(self)
# Clean up the environment
def cleanup(self):
Host.cleanup(self)
# Rm dir
if os.path.exists(self.dir):
shutil.rmtree(self.dir)
# Abstraction to model a SRv6Controller
class SRv6Controller(MHost):
# Config hook
def config(self, **kwargs):
MHost.config(self, **kwargs)
# Abstraction to model a SRv6Firewall
class WANRouter(MHost):
# Config hook
def config(self, **kwargs):
MHost.config(self, **kwargs)
# Enable IPv6 forwarding
self.exec_cmd("sysctl -w net.ipv6.conf.all.forwarding=1")
# Enable IPv4 forwarding
self.exec_cmd("sysctl -w net.ipv4.conf.all.forwarding=1")
| 44.59125
| 127
| 0.491128
| 4,096
| 35,673
| 4.214844
| 0.101074
| 0.039388
| 0.054159
| 0.019694
| 0.826054
| 0.810704
| 0.798193
| 0.78377
| 0.777804
| 0.764365
| 0
| 0.014305
| 0.39251
| 35,673
| 799
| 128
| 44.647059
| 0.782336
| 0.198974
| 0
| 0.799615
| 0
| 0.019268
| 0.207744
| 0.025115
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032755
| false
| 0.023121
| 0.013487
| 0
| 0.055877
| 0.001927
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d34b6cf97e4453aca00c87921d4709acdc7114eb
| 152
|
py
|
Python
|
netrino/models/__init__.py
|
HieronymusCrouse/netrino
|
dcecbfa8a4e5f79c6f74fb1a711698e384feee7e
|
[
"BSD-3-Clause"
] | 2
|
2018-05-17T15:50:54.000Z
|
2019-05-16T16:57:30.000Z
|
netrino/models/__init__.py
|
HieronymusCrouse/netrino
|
dcecbfa8a4e5f79c6f74fb1a711698e384feee7e
|
[
"BSD-3-Clause"
] | 46
|
2018-04-18T08:37:50.000Z
|
2019-06-06T13:15:13.000Z
|
netrino/models/__init__.py
|
HieronymusCrouse/netrino
|
dcecbfa8a4e5f79c6f74fb1a711698e384feee7e
|
[
"BSD-3-Clause"
] | 8
|
2018-02-26T08:16:24.000Z
|
2019-12-27T12:11:05.000Z
|
import netrino.models.processes
import netrino.models.workflows
import netrino.models.products
import netrino.models.orders
import netrino.models.tasks
| 25.333333
| 31
| 0.868421
| 20
| 152
| 6.6
| 0.4
| 0.492424
| 0.719697
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.065789
| 152
| 5
| 32
| 30.4
| 0.929577
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
d39664cbf05170bd7fe6af082d49eb5e6c01e5e6
| 30,849
|
py
|
Python
|
etcmodel/models/openkp/generate_examples_lib_test.py
|
gunpowder78/google-research
|
d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5
|
[
"Apache-2.0"
] | 1
|
2022-03-13T21:48:52.000Z
|
2022-03-13T21:48:52.000Z
|
etcmodel/models/openkp/generate_examples_lib_test.py
|
gunpowder78/google-research
|
d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5
|
[
"Apache-2.0"
] | null | null | null |
etcmodel/models/openkp/generate_examples_lib_test.py
|
gunpowder78/google-research
|
d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5
|
[
"Apache-2.0"
] | 1
|
2022-03-30T07:20:29.000Z
|
2022-03-30T07:20:29.000Z
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for generate_examples_lib."""
import json
import os
from absl.testing import absltest
from etcmodel.models import tokenization
from etcmodel.models.openkp import generate_examples_lib as lib
VOCAB_PATH = 'etcmodel/models/openkp/test_data/vocab.txt'
LONG_WORD_FIRST_OCCURRENCE1 = [
0,
1,
2,
3,
4,
5,
6,
5,
6,
9,
10,
11,
12,
13,
14,
15,
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
]
LONG_WORD_FIRST_OCCURRENCE2 = [
0,
1,
2,
3,
4,
5,
6,
5,
6,
9,
10,
11,
11,
13,
14,
15,
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
]
LONG_WORD_FIRST_OCCURRENCE3 = [
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
]
class GenerateExamplesLibTest(absltest.TestCase):
def test_open_kp_example_from_json(self):
example_json = r"""
{
"url": "http://0123putlocker.com/watch/qd7kBodK-star-trek-discovery-season-1.html",
"text": "Star Trek Discovery Season 1 Director",
"VDOM": "[{\"Id\":0,\"text\":\"Star Trek Discovery Season 1\",\"feature\":[44.0,728.0,78.0,45.0,1.0,0.0,1.0,0.0,20.0,0.0,44.0,728.0,78.0,45.0,1.0,0.0,1.0,0.0,20.0,0.0],\"start_idx\":0,\"end_idx\":5},{\"Id\":0,\"text\":\"Director\",\"feature\":[208.0,49.0,138.0,15.0,0.0,0.0,0.0,0.0,12.0,1.0,198.0,564.0,138.0,15.0,1.0,0.0,0.0,0.0,12.0,1.0],\"start_idx\":5,\"end_idx\":6}]",
"KeyPhrases": [
[
"Star",
"Trek"
],
[
"Jason",
"Isaacs"
],
[
"Doug",
"Jones"
]
]
}
"""
example = lib.OpenKpExample.from_json(example_json)
expected = lib.OpenKpExample(
url='http://0123putlocker.com/watch/qd7kBodK-star-trek-discovery-season-1.html',
text='Star Trek Discovery Season 1 Director',
vdom=[
lib.VdomElement(
id=0,
text='Star Trek Discovery Season 1',
features=lib.VdomFeatures(
x_coord=44.0,
width=728.0,
y_coord=78.0,
height=45.0,
is_block=True,
is_inline=False,
is_heading=True,
is_leaf=False,
font_size=20,
is_bold=False),
parent_features=lib.VdomFeatures(
x_coord=44.0,
width=728.0,
y_coord=78.0,
height=45.0,
is_block=True,
is_inline=False,
is_heading=True,
is_leaf=False,
font_size=20,
is_bold=False),
start_idx=0,
end_idx=5),
lib.VdomElement(
id=0,
text='Director',
features=lib.VdomFeatures(
x_coord=208.0,
width=49.0,
y_coord=138.0,
height=15.0,
is_block=False,
is_inline=False,
is_heading=False,
is_leaf=False,
font_size=12,
is_bold=True),
parent_features=lib.VdomFeatures(
x_coord=198.0,
width=564.0,
y_coord=138.0,
height=15.0,
is_block=True,
is_inline=False,
is_heading=False,
is_leaf=False,
font_size=12,
is_bold=True),
start_idx=5,
end_idx=6)
],
key_phrases=[
lib.KeyPhrase(['Star', 'Trek']),
lib.KeyPhrase(['Jason', 'Isaacs']),
lib.KeyPhrase(['Doug', 'Jones']),
])
self.assertEqual(expected, example)
def test_open_kp_example_from_json_unlabeled(self):
example_json = r"""
{
"url": "http://0123putlocker.com/watch/qd7kBodK-star-trek-discovery-season-1.html",
"text": "Star Trek Discovery Season 1 Director",
"VDOM": "[{\"Id\":0,\"text\":\"Star Trek Discovery Season 1\",\"feature\":[44.0,728.0,78.0,45.0,1.0,0.0,1.0,0.0,20.0,0.0,44.0,728.0,78.0,45.0,1.0,0.0,1.0,0.0,20.0,0.0],\"start_idx\":0,\"end_idx\":5},{\"Id\":0,\"text\":\"Director\",\"feature\":[208.0,49.0,138.0,15.0,0.0,0.0,0.0,0.0,12.0,1.0,198.0,564.0,138.0,15.0,1.0,0.0,0.0,0.0,12.0,1.0],\"start_idx\":5,\"end_idx\":6}]"
}
"""
example = lib.OpenKpExample.from_json(example_json)
expected = lib.OpenKpExample(
url='http://0123putlocker.com/watch/qd7kBodK-star-trek-discovery-season-1.html',
text='Star Trek Discovery Season 1 Director',
vdom=[
lib.VdomElement(
id=0,
text='Star Trek Discovery Season 1',
features=lib.VdomFeatures(
x_coord=44.0,
width=728.0,
y_coord=78.0,
height=45.0,
is_block=True,
is_inline=False,
is_heading=True,
is_leaf=False,
font_size=20,
is_bold=False),
parent_features=lib.VdomFeatures(
x_coord=44.0,
width=728.0,
y_coord=78.0,
height=45.0,
is_block=True,
is_inline=False,
is_heading=True,
is_leaf=False,
font_size=20,
is_bold=False),
start_idx=0,
end_idx=5),
lib.VdomElement(
id=0,
text='Director',
features=lib.VdomFeatures(
x_coord=208.0,
width=49.0,
y_coord=138.0,
height=15.0,
is_block=False,
is_inline=False,
is_heading=False,
is_leaf=False,
font_size=12,
is_bold=True),
parent_features=lib.VdomFeatures(
x_coord=198.0,
width=564.0,
y_coord=138.0,
height=15.0,
is_block=True,
is_inline=False,
is_heading=False,
is_leaf=False,
font_size=12,
is_bold=True),
start_idx=5,
end_idx=6)
],
key_phrases=None)
self.assertEqual(expected, example)
def test_vdom_element_from_dict(self):
vdom_element_json = """
{
"Id": 0,
"text": "Director",
"feature": [
208.0,
49.0,
138.0,
15.0,
0.0,
0.0,
0.0,
0.0,
12.0,
1.0,
198.0,
564.0,
138.0,
15.0,
1.0,
0.0,
0.0,
0.0,
12.0,
1.0
],
"start_idx": 5,
"end_idx": 6
}
"""
vdom_element = lib.VdomElement.from_dict(json.loads(vdom_element_json))
expected = lib.VdomElement(
id=0,
text='Director',
features=lib.VdomFeatures(
x_coord=208.0,
width=49.0,
y_coord=138.0,
height=15.0,
is_block=False,
is_inline=False,
is_heading=False,
is_leaf=False,
font_size=12,
is_bold=True),
parent_features=lib.VdomFeatures(
x_coord=198.0,
width=564.0,
y_coord=138.0,
height=15.0,
is_block=True,
is_inline=False,
is_heading=False,
is_leaf=False,
font_size=12,
is_bold=True),
start_idx=5,
end_idx=6)
self.assertEqual(expected, vdom_element)
def test_font_size_cutoffs(self):
# Cutoffs must be unique.
self.assertEqual(
len(lib._FONT_SIZE_CUTOFFS), len(set(lib._FONT_SIZE_CUTOFFS)))
# Cutoffs must be in increasing order.
self.assertEqual(lib._FONT_SIZE_CUTOFFS, sorted(lib._FONT_SIZE_CUTOFFS))
def test_font_size_to_font_id(self):
self.assertEqual(0, lib.font_size_to_font_id(0))
self.assertEqual(1, lib.font_size_to_font_id(1))
self.assertEqual(1, lib.font_size_to_font_id(8))
self.assertEqual(2, lib.font_size_to_font_id(9))
self.assertEqual(17, lib.font_size_to_font_id(24))
self.assertEqual(18, lib.font_size_to_font_id(25))
self.assertEqual(18, lib.font_size_to_font_id(29))
self.assertEqual(19, lib.font_size_to_font_id(30))
self.assertEqual(19, lib.font_size_to_font_id(34))
self.assertEqual(20, lib.font_size_to_font_id(35))
self.assertEqual(20, lib.font_size_to_font_id(39))
self.assertEqual(21, lib.font_size_to_font_id(44))
self.assertEqual(22, lib.font_size_to_font_id(49))
self.assertEqual(23, lib.font_size_to_font_id(50))
self.assertEqual(23, lib.font_size_to_font_id(100))
self.assertEqual(24, lib.FONT_ID_VOCAB_SIZE)
def test_etc_features_from_open_kp_example(self):
example = lib.OpenKpExample(
url='http://0123putlocker.com/watch/qd7kBodK-star-trek-discovery-season-1.html',
text='Star Trek Discovery Season 1 Jason Isaacs Jason Isaacs and Doug',
vdom=[
lib.VdomElement(
id=0,
text='Star Trek Discovery Season 1 Jason',
features=lib.VdomFeatures(
x_coord=44.0,
width=728.0,
y_coord=78.0,
height=45.0,
is_block=True,
is_inline=False,
is_heading=True,
is_leaf=False,
font_size=20,
is_bold=False),
parent_features=lib.VdomFeatures(
x_coord=44.0,
width=728.0,
y_coord=78.0,
height=45.0,
is_block=True,
is_inline=False,
is_heading=True,
is_leaf=False,
font_size=20,
is_bold=False),
start_idx=0,
end_idx=6),
lib.VdomElement(
id=0,
text='Isaacs Jason Isaacs and Doug',
features=lib.VdomFeatures(
x_coord=208.0,
width=49.0,
y_coord=138.0,
height=15.0,
is_block=False,
is_inline=False,
is_heading=False,
is_leaf=False,
font_size=12,
is_bold=True),
parent_features=lib.VdomFeatures(
x_coord=198.0,
width=564.0,
y_coord=138.0,
height=15.0,
is_block=True,
is_inline=False,
is_heading=False,
is_leaf=False,
font_size=12,
is_bold=True),
start_idx=6,
end_idx=11)
],
key_phrases=[
lib.KeyPhrase(['Star', 'Trek']),
lib.KeyPhrase(['Jason', 'Isaacs'])
])
bert_vocab_path = os.path.join(absltest.get_default_test_srcdir(),
VOCAB_PATH)
config = lib.EtcFeaturizationConfig(
long_max_length=16,
global_max_length=4,
url_max_code_points=80,
bert_vocab_path=bert_vocab_path,
do_lower_case=True)
tokenizer = tokenization.FullTokenizer(
config.bert_vocab_path, do_lower_case=config.do_lower_case)
etc_features = example.to_etc_features(tokenizer, config)
expected = lib.OpenKpEtcFeatures(
url_code_points=[
104, 116, 116, 112, 58, 47, 47, 48, 49, 50, 51, 112, 117, 116, 108,
111, 99, 107, 101, 114, 46, 99, 111, 109, 47, 119, 97, 116, 99, 104,
47, 113, 100, 55, 107, 66, 111, 100, 75, 45, 115, 116, 97, 114, 45,
116, 114, 101, 107, 45, 100, 105, 115, 99, 111, 118, 101, 114, 121,
45, 115, 101, 97, 115, 111, 110, 45, 49, 46, 104, 116, 109, 108, -1,
-1, -1, -1, -1, -1, -1
],
label_start_idx=[0, 7, -1],
label_phrase_len=[2, 2, -1],
long_token_ids=[3, 4, 5, 6, 7, 8, 9, 10, 8, 9, 10, 11, 12, 0, 0, 0],
long_word_idx=[0, 1, 2, 3, 4, 5, 6, 6, 7, 8, 8, 9, 10, 0, 0, 0],
long_vdom_idx=[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
long_input_mask=[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
long_word_input_mask=[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
long_word_first_occurrence=LONG_WORD_FIRST_OCCURRENCE1,
global_token_ids=[1, 1, 1, 1],
global_input_mask=[1, 1, 0, 0],
global_x_coords=[44.0, 208.0, 0.0, 0.0],
global_y_coords=[78.0, 138.0, 0.0, 0.0],
global_widths=[728.0, 49.0, 0.0, 0.0],
global_heights=[45.0, 15.0, 0.0, 0.0],
global_font_ids=[13, 5, 0, 0],
global_block_indicator=[1, 0, 0, 0],
global_inline_indicator=[0, 0, 0, 0],
global_heading_indicator=[1, 0, 0, 0],
global_leaf_indicator=[0, 0, 0, 0],
global_bold_indicator=[0, 1, 0, 0],
global_parent_x_coords=[44.0, 198.0, 0.0, 0.0],
global_parent_y_coords=[78.0, 138.0, 0.0, 0.0],
global_parent_widths=[728.0, 564.0, 0.0, 0.0],
global_parent_heights=[45.0, 15.0, 0.0, 0.0],
global_parent_font_ids=[13, 5, 0, 0],
global_parent_heading_indicator=[1, 0, 0, 0],
global_parent_leaf_indicator=[0, 0, 0, 0],
global_parent_bold_indicator=[0, 1, 0, 0])
self.assertEqual(expected, etc_features)
def test_etc_features_with_vdom_overflow(self):
vdom = [
lib.VdomElement(
id=0,
text='Star Trek Discovery Season 1 Jason',
features=lib.VdomFeatures(
x_coord=44.0,
width=728.0,
y_coord=78.0,
height=45.0,
is_block=True,
is_inline=False,
is_heading=True,
is_leaf=False,
font_size=20,
is_bold=False),
parent_features=lib.VdomFeatures(
x_coord=44.0,
width=728.0,
y_coord=78.0,
height=45.0,
is_block=True,
is_inline=False,
is_heading=True,
is_leaf=False,
font_size=20,
is_bold=False),
start_idx=0,
end_idx=5),
lib.VdomElement(
id=0,
text='Isaacs Jason Isaacs and Doug',
features=lib.VdomFeatures(
x_coord=208.0,
width=49.0,
y_coord=138.0,
height=15.0,
is_block=False,
is_inline=False,
is_heading=False,
is_leaf=False,
font_size=12,
is_bold=True),
parent_features=lib.VdomFeatures(
x_coord=198.0,
width=564.0,
y_coord=138.0,
height=15.0,
is_block=True,
is_inline=False,
is_heading=False,
is_leaf=False,
font_size=12,
is_bold=True),
start_idx=5,
end_idx=8)
]
text = 'Star Trek Discovery Season 1 Director Jason Isaacs'
text += ' foo' * (20 - 8)
vdom.extend([
lib.VdomElement(
id=0,
text='foo',
features=lib.VdomFeatures(
x_coord=208.0,
width=49.0,
y_coord=138.0,
height=15.0,
is_block=False,
is_inline=False,
is_heading=False,
is_leaf=True,
font_size=12,
is_bold=True),
parent_features=lib.VdomFeatures(
x_coord=3110.0,
width=92.0,
y_coord=123.0,
height=75.0,
is_block=True,
is_inline=False,
is_heading=False,
is_leaf=True,
font_size=13,
is_bold=True),
start_idx=start_idx,
end_idx=start_idx + 1) for start_idx in range(8, 20)
])
example = lib.OpenKpExample(
url='http://0123putlocker.com/watch/qd7kBodK-star-trek-discovery-season-1.html',
text=text,
vdom=vdom,
key_phrases=[
lib.KeyPhrase(['Star', 'Trek']),
lib.KeyPhrase(['Jason', 'Isaacs']),
])
bert_vocab_path = os.path.join(absltest.get_default_test_srcdir(),
VOCAB_PATH)
config = lib.EtcFeaturizationConfig(
long_max_length=16,
global_max_length=4,
url_max_code_points=80,
bert_vocab_path=bert_vocab_path,
do_lower_case=True)
tokenizer = tokenization.FullTokenizer(
config.bert_vocab_path, do_lower_case=config.do_lower_case)
etc_features = example.to_etc_features(tokenizer, config)
expected = lib.OpenKpEtcFeatures(
url_code_points=[
104, 116, 116, 112, 58, 47, 47, 48, 49, 50, 51, 112, 117, 116, 108,
111, 99, 107, 101, 114, 46, 99, 111, 109, 47, 119, 97, 116, 99, 104,
47, 113, 100, 55, 107, 66, 111, 100, 75, 45, 115, 116, 97, 114, 45,
116, 114, 101, 107, 45, 100, 105, 115, 99, 111, 118, 101, 114, 121,
45, 115, 101, 97, 115, 111, 110, 45, 49, 46, 104, 116, 109, 108, -1,
-1, -1, -1, -1, -1, -1
],
label_start_idx=[0, 7, -1],
label_phrase_len=[2, 2, -1],
long_token_ids=[3, 4, 5, 6, 7, 8, 9, 10, 8, 9, 10, 11, 12, 13, 13, 0],
long_word_idx=[0, 1, 2, 3, 4, 5, 6, 6, 7, 8, 8, 9, 10, 11, 12, 0],
long_vdom_idx=[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 2, 3, 0],
long_input_mask=[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
long_word_input_mask=[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
long_word_first_occurrence=LONG_WORD_FIRST_OCCURRENCE2,
global_token_ids=[1, 1, 1, 1],
global_input_mask=[1, 1, 1, 1],
global_x_coords=[44.0, 208.0, 208.0, 208.0],
global_y_coords=[78.0, 138.0, 138.0, 138.0],
global_widths=[728.0, 49.0, 49.0, 49.0],
global_heights=[45.0, 15.0, 15.0, 15.0],
global_font_ids=[13, 5, 5, 5],
global_block_indicator=[1, 0, 0, 0],
global_inline_indicator=[0, 0, 0, 0],
global_heading_indicator=[1, 0, 0, 0],
global_leaf_indicator=[0, 0, 1, 1],
global_bold_indicator=[0, 1, 1, 1],
global_parent_x_coords=[44.0, 198.0, 3110.0, 3110.0],
global_parent_y_coords=[78.0, 138.0, 123.0, 123.0],
global_parent_widths=[728.0, 564.0, 92.0, 92.0],
global_parent_heights=[45.0, 15.0, 75.0, 75.0],
global_parent_font_ids=[13, 5, 6, 6],
global_parent_heading_indicator=[1, 0, 0, 0],
global_parent_leaf_indicator=[0, 0, 1, 1],
global_parent_bold_indicator=[0, 1, 1, 1])
self.assertEqual(expected, etc_features)
def test_etc_features_with_long_overflow(self):
text = 'Star Wars and not Trek ' + ' '.join(['star'] * 12)
vdom = [
lib.VdomElement(
id=0,
text='Star Wars and not Trek',
features=lib.VdomFeatures(
x_coord=44.0,
width=728.0,
y_coord=78.0,
height=45.0,
is_block=True,
is_inline=False,
is_heading=True,
is_leaf=False,
font_size=20,
is_bold=False),
parent_features=lib.VdomFeatures(
x_coord=44.0,
width=728.0,
y_coord=78.0,
height=45.0,
is_block=True,
is_inline=False,
is_heading=True,
is_leaf=False,
font_size=20,
is_bold=False),
start_idx=0,
end_idx=5),
lib.VdomElement(
id=0,
text=' '.join(['star'] * 99),
features=lib.VdomFeatures(
x_coord=44.0,
width=728.0,
y_coord=78.0,
height=45.0,
is_block=True,
is_inline=False,
is_heading=True,
is_leaf=False,
font_size=20,
is_bold=False),
parent_features=lib.VdomFeatures(
x_coord=44.0,
width=728.0,
y_coord=78.0,
height=45.0,
is_block=True,
is_inline=False,
is_heading=True,
is_leaf=False,
font_size=20,
is_bold=False),
start_idx=5,
end_idx=17)
]
example = lib.OpenKpExample(
url='http://0123putlocker.com/watch/qd7kBodK-star-trek-discovery-season-1.html',
text=text,
vdom=vdom,
key_phrases=[
lib.KeyPhrase(['Star', 'Wars']),
lib.KeyPhrase(['Trek']),
])
bert_vocab_path = os.path.join(absltest.get_default_test_srcdir(),
VOCAB_PATH)
config = lib.EtcFeaturizationConfig(
long_max_length=16,
global_max_length=4,
url_max_code_points=80,
bert_vocab_path=bert_vocab_path,
do_lower_case=True)
tokenizer = tokenization.FullTokenizer(
config.bert_vocab_path, do_lower_case=config.do_lower_case)
etc_features = example.to_etc_features(tokenizer, config)
expected = lib.OpenKpEtcFeatures(
url_code_points=[
104, 116, 116, 112, 58, 47, 47, 48, 49, 50, 51, 112, 117, 116, 108,
111, 99, 107, 101, 114, 46, 99, 111, 109, 47, 119, 97, 116, 99, 104,
47, 113, 100, 55, 107, 66, 111, 100, 75, 45, 115, 116, 97, 114, 45,
116, 114, 101, 107, 45, 100, 105, 115, 99, 111, 118, 101, 114, 121,
45, 115, 101, 97, 115, 111, 110, 45, 49, 46, 104, 116, 109, 108, -1,
-1, -1, -1, -1, -1, -1
],
label_start_idx=[0, 4, -1],
label_phrase_len=[2, 1, -1],
long_token_ids=[3, 14, 11, 15, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
long_word_idx=[0, 1, 2, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
long_vdom_idx=[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
long_input_mask=[1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
long_word_input_mask=[1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
long_word_first_occurrence=LONG_WORD_FIRST_OCCURRENCE3,
global_token_ids=[1, 1, 1, 1],
global_input_mask=[1, 0, 0, 0],
global_x_coords=[44.0, 0, 0, 0],
global_y_coords=[78.0, 0, 0, 0],
global_widths=[728.0, 0, 0, 0],
global_heights=[45.0, 0, 0, 0],
global_font_ids=[13, 0, 0, 0],
global_block_indicator=[1, 0, 0, 0],
global_inline_indicator=[0, 0, 0, 0],
global_heading_indicator=[1, 0, 0, 0],
global_leaf_indicator=[0, 0, 0, 0],
global_bold_indicator=[0, 0, 0, 0],
global_parent_x_coords=[44.0, 0, 0, 0],
global_parent_y_coords=[78.0, 0, 0, 0],
global_parent_widths=[728.0, 0, 0, 0],
global_parent_heights=[45.0, 0, 0, 0],
global_parent_font_ids=[13, 0, 0, 0],
global_parent_heading_indicator=[1, 0, 0, 0],
global_parent_leaf_indicator=[0, 0, 0, 0],
global_parent_bold_indicator=[0, 0, 0, 0])
self.assertEqual(expected, etc_features)
def test_etc_features_fixed_global_blocks(self):
example = lib.OpenKpExample(
url='http://0123putlocker.com/watch/qd7kBodK-star-trek-discovery-season-1.html',
text='Star Trek Discovery Season 1 Jason Isaacs Jason Isaacs and Doug',
vdom=[
lib.VdomElement(
id=0,
text='Star Trek Discovery Season 1 Jason',
features=lib.VdomFeatures(
x_coord=44.0,
width=728.0,
y_coord=78.0,
height=45.0,
is_block=True,
is_inline=False,
is_heading=True,
is_leaf=False,
font_size=20,
is_bold=False),
parent_features=lib.VdomFeatures(
x_coord=44.0,
width=728.0,
y_coord=78.0,
height=45.0,
is_block=True,
is_inline=False,
is_heading=True,
is_leaf=False,
font_size=20,
is_bold=False),
start_idx=0,
end_idx=6),
lib.VdomElement(
id=0,
text='Isaacs Jason Isaacs and Doug',
features=lib.VdomFeatures(
x_coord=208.0,
width=49.0,
y_coord=138.0,
height=15.0,
is_block=False,
is_inline=False,
is_heading=False,
is_leaf=False,
font_size=12,
is_bold=True),
parent_features=lib.VdomFeatures(
x_coord=198.0,
width=564.0,
y_coord=138.0,
height=15.0,
is_block=True,
is_inline=False,
is_heading=False,
is_leaf=False,
font_size=12,
is_bold=True),
start_idx=6,
end_idx=11)
],
key_phrases=[
lib.KeyPhrase(['Star', 'Trek']),
lib.KeyPhrase(['Jason', 'Isaacs'])
])
bert_vocab_path = os.path.join(absltest.get_default_test_srcdir(),
VOCAB_PATH)
config = lib.EtcFeaturizationConfig(
long_max_length=16,
global_max_length=4,
url_max_code_points=80,
bert_vocab_path=bert_vocab_path,
do_lower_case=True,
fixed_block_len=4)
tokenizer = tokenization.FullTokenizer(
config.bert_vocab_path, do_lower_case=config.do_lower_case)
etc_features = example.to_etc_features(tokenizer, config)
expected = lib.OpenKpEtcFeatures(
url_code_points=[
104, 116, 116, 112, 58, 47, 47, 48, 49, 50, 51, 112, 117, 116, 108,
111, 99, 107, 101, 114, 46, 99, 111, 109, 47, 119, 97, 116, 99, 104,
47, 113, 100, 55, 107, 66, 111, 100, 75, 45, 115, 116, 97, 114, 45,
116, 114, 101, 107, 45, 100, 105, 115, 99, 111, 118, 101, 114, 121,
45, 115, 101, 97, 115, 111, 110, 45, 49, 46, 104, 116, 109, 108, -1,
-1, -1, -1, -1, -1, -1
],
label_start_idx=[5, 0, -1],
label_phrase_len=[2, 2, -1],
long_token_ids=[3, 4, 5, 6, 7, 8, 9, 10, 8, 9, 10, 11, 12, 0, 0, 0],
long_word_idx=[0, 1, 2, 3, 4, 5, 6, 6, 7, 8, 8, 9, 10, 0, 0, 0],
long_vdom_idx=[0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 0, 0, 0],
long_input_mask=[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
long_word_input_mask=[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
global_token_ids=[1, 1, 1, 1],
global_input_mask=[1, 1, 1, 1],
global_x_coords=[],
global_y_coords=[],
global_widths=[],
global_heights=[],
global_font_ids=[],
global_block_indicator=[],
global_inline_indicator=[],
global_heading_indicator=[],
global_leaf_indicator=[],
global_bold_indicator=[],
global_parent_x_coords=[],
global_parent_y_coords=[],
global_parent_widths=[],
global_parent_heights=[],
global_parent_font_ids=[],
global_parent_heading_indicator=[],
global_parent_leaf_indicator=[],
global_parent_bold_indicator=[])
self.assertEqual(expected, etc_features)
if __name__ == '__main__':
absltest.main()
| 29.921435
| 379
| 0.480048
| 4,094
| 30,849
| 3.416952
| 0.066683
| 0.039174
| 0.04139
| 0.034027
| 0.857102
| 0.846165
| 0.827293
| 0.809779
| 0.791693
| 0.753378
| 0
| 0.151801
| 0.395248
| 30,849
| 1,030
| 380
| 29.950485
| 0.598038
| 0.021816
| 0
| 0.819388
| 0
| 0.010204
| 0.094445
| 0.024042
| 0.00102
| 0
| 0
| 0
| 0.02551
| 1
| 0.009184
| false
| 0
| 0.005102
| 0
| 0.015306
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6c9bba051cc99dc5d55ec5d9e24aba9b4671727e
| 212
|
py
|
Python
|
project/frontend/views.py
|
Rampo0/django_react
|
c6bfbf642d22ae81251b88448a6f349f2b3c768a
|
[
"MIT"
] | 2
|
2019-11-30T14:31:37.000Z
|
2020-01-05T08:37:15.000Z
|
project/frontend/views.py
|
Rampo0/django_react
|
c6bfbf642d22ae81251b88448a6f349f2b3c768a
|
[
"MIT"
] | null | null | null |
project/frontend/views.py
|
Rampo0/django_react
|
c6bfbf642d22ae81251b88448a6f349f2b3c768a
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
def index(request):
return render(request , 'frontend/index.html', {})
def error_404_view(request , exception):
return render(request , 'frontend/error_404.html', {})
| 30.285714
| 58
| 0.731132
| 27
| 212
| 5.62963
| 0.555556
| 0.157895
| 0.25
| 0.355263
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.032787
| 0.136792
| 212
| 7
| 58
| 30.285714
| 0.797814
| 0
| 0
| 0
| 0
| 0
| 0.197183
| 0.107981
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0.2
| 0.4
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
6c9bdb3851c2e61d0a1bd789791220b6d5e8af48
| 34,224
|
py
|
Python
|
Updated Code/cnn_class.py
|
Bharathgc/Evaluating-Fusion-points-for-multi-stream-networks-handling-cross-modal-data
|
116fe18e1fcc4749b1454003cb774fdd2319ebb3
|
[
"MIT"
] | 7
|
2018-11-22T07:56:47.000Z
|
2022-03-24T02:24:10.000Z
|
Updated Code/cnn_class.py
|
Bharathgc/Evaluating-Fusion-points-for-multi-stream-networks-handling-cross-modal-data
|
116fe18e1fcc4749b1454003cb774fdd2319ebb3
|
[
"MIT"
] | null | null | null |
Updated Code/cnn_class.py
|
Bharathgc/Evaluating-Fusion-points-for-multi-stream-networks-handling-cross-modal-data
|
116fe18e1fcc4749b1454003cb774fdd2319ebb3
|
[
"MIT"
] | 3
|
2019-07-13T15:15:49.000Z
|
2021-05-24T08:21:40.000Z
|
import tensorflow as tf
import numpy as np
class CNN(object):
def __init__(self, num_classes, keep_prob ):
super(CNN, self).__init__()
#self.NUM_SAMPLES = num_samples
#self.WIDTH = width
#self.HEIGHT = height
self.NUM_CLASSES = num_classes
self.KEEP_PROB = keep_prob
def conv_layer_relu(self,x, weights, biases, stride, name, relu = 'TRUE', padding = 'SAME'):
with tf.variable_scope(name) as scope:
weights = tf.get_variable("weights", weights, initializer = tf.truncated_normal_initializer())
biases = tf.get_variable("biases", biases, initializer = tf.truncated_normal_initializer())
conv = tf.nn.conv2d(x, weights, strides= stride, padding = padding, name = scope.name)
if relu == 'TRUE':
conv = tf.nn.relu(tf.add(conv, biases), name = scope.name + "_relu")
return conv
def maxpool(self,x, filter_size, stride,name):
return tf.nn.max_pool(x, ksize = filter_size, strides = stride, padding = 'VALID', name = name)
def fc_relu(self,x, weights, biases, name, relu = 'TRUE'):
with tf.variable_scope(name) as scope:
weights = tf.get_variable("weights", weights, initializer = tf.truncated_normal_initializer())
biases = tf.get_variable("biases", biases, initializer = tf.truncated_normal_initializer())
fc = tf.add(tf.matmul(x, weights ), biases, name = scope.name)
if relu == 'TRUE':
fc = tf.nn.relu(fc, name = scope.name + "_relu")
return fc
def dropout(self,x, name):
return tf.nn.dropout(x, self.KEEP_PROB, name = name)
def alex_net(self, x):
#reshaping into 4d tensor
x = tf.reshape(x , [-1, 224,224,3])
#conv1 layer with relu
conv1 = self.conv_layer_relu(x, [11,11,3,96], [96], [1,4,4,1], "alex_conv1")
#maxpool_1
pool1 = self.maxpool(conv1,[1,3,3,1], [1,2,2,1], "alex_pool1")
#normalization layer after conv1
norm1 = tf.nn.local_response_normalization(pool1, name = "alex_norm1")
#conv2 layer with relu
conv2 = self.conv_layer_relu(norm1, [5,5,96,256], [256], [1,1,1,1], "alex_conv2")
#maxpool_2
pool2 = self.maxpool(conv2,[1,3,3,1], [1,2,2,1], "alex_pool2")
#normalization after conv2
norm2 = tf.nn.local_response_normalization(pool2, name = "alex_norm2")
#conv3 layer with relu
conv3 = self.conv_layer_relu(norm2, [3,3,256,384], [384], [1,1,1,1], "alex_conv3")
#conv4 layer with relu
conv4 = self.conv_layer_relu(conv3, [3,3,384,384], [384], [1,1,1,1], "alex_conv4")
#conv5 layer with relu
conv5 = self.conv_layer_relu(conv4, [3,3,384,256], [256], [1,1,1,1], "alex_conv5")
#maxpool_2 after conv5
pool3 = self.maxpool(conv5,[1,3,3,1], [1,2,2,1], "alex_pool3")
#stretching data into array for fc layers
x2 = tf.reshape(pool3,[-1, 6*6*256])
#fc6 with relu
fc6 = self.fc_relu(x2, [6*6*256, 4096], [4096], "alex_fc6")
#dropout for fc6
dropout_fc6 = self.dropout(fc6, "alex_drop_fc6")
#fc7 with relu
fc7 = self.fc_relu(dropout_fc6, [4096,4096], [4096], "alex_fc7")
#dropout for fc7
dropout_fc7 = self.dropout(fc7, "alex_drop_fc7")
#fc8 or output WITHOUT acivation and dropout
out = self.fc_relu(dropout_fc7, [4096, self.NUM_CLASSES], [self.NUM_CLASSES], "alex_out", relu = 'FALSE')
return out
def alexnet_stream_2(self,x,scope_name):
with tf.variable_scope(scope_name):
#reshaping into 4d tensor
x = tf.reshape(x , [-1, 224,224,3])
#conv1 layer with relu
conv1 = self.conv_layer_relu(x, [11,11,3,96], [96], [1,4,4,1], "alex_conv1")
#maxpool_1
pool1 = self.maxpool(conv1,[1,3,3,1], [1,2,2,1], "alex_pool1")
#normalization layer after conv1
norm1 = tf.nn.local_response_normalization(pool1, name = "alex_norm1")
#conv2 layer with relu
conv2 = self.conv_layer_relu(norm1, [5,5,96,256], [256], [1,1,1,1], "alex_conv2")
#maxpool_2
pool2 = self.maxpool(conv2,[1,3,3,1], [1,2,2,1], "alex_pool2")
#normalization after conv2
norm2 = tf.nn.local_response_normalization(pool2, name = "alex_norm2")
return norm2
def alexnet_bottom_2(self,norm2):
#conv3 layer with relu
conv3 = self.conv_layer_relu(norm2, [3,3,256,384], [384], [1,1,1,1], "alex_conv3")
#conv4 layer with relu
conv4 = self.conv_layer_relu(conv3, [3,3,384,384], [384], [1,1,1,1], "alex_conv4")
#conv5 layer with relu
conv5 = self.conv_layer_relu(conv4, [3,3,384,256], [256], [1,1,1,1], "alex_conv5")
#maxpool_2 after conv5
pool3 = self.maxpool(conv5,[1,3,3,1], [1,2,2,1], "alex_pool3")
#stretching data into array for fc layers
x2 = tf.reshape(pool3,[-1, 6*6*256])
#fc6 with relu
fc6 = self.fc_relu(x2, [6*6*256, 4096], [4096], "alex_fc6")
#dropout for fc6
dropout_fc6 = self.dropout(fc6, "alex_drop_fc6")
#fc7 with relu
fc7 = self.fc_relu(dropout_fc6, [4096,4096], [4096], "alex_fc7")
#dropout for fc7
dropout_fc7 = self.dropout(fc7, "alex_drop_fc7")
#fc8 or output WITHOUT acivation and dropout
out = self.fc_relu(dropout_fc7, [4096, self.NUM_CLASSES], [self.NUM_CLASSES], "alex_out", relu = 'FALSE')
return out
def alexnet_fused2(self,x_1,x_2):
stream1=self.alexnet_stream_2(x_1,'stream_1') # top stream
stream2=self.alexnet_stream_2(x_2,'stream_2') # top stream
#fusion by averaging
fuse_point=tf.add(stream1,stream2)
fuse_point=tf.scalar_mul(1.0/2.0,fuse_point)
# joining the network
fused_output=self.alexnet_bottom_2(fuse_point) # bottom stream
return fused_output
def alexnet_stream_3(self,x,scope_name):
with tf.variable_scope(scope_name):
#reshaping into 4d tensor
x = tf.reshape(x , [-1, 224,224,3])
#conv1 layer with relu
conv1 = self.conv_layer_relu(x, [11,11,3,96], [96], [1,4,4,1], "alex_conv1")
#maxpool_1
pool1 = self.maxpool(conv1,[1,3,3,1], [1,2,2,1], "alex_pool1")
#normalization layer after conv1
norm1 = tf.nn.local_response_normalization(pool1, name = "alex_norm1")
#conv2 layer with relu
conv2 = self.conv_layer_relu(norm1, [5,5,96,256], [256], [1,1,1,1], "alex_conv2")
#maxpool_2
pool2 = self.maxpool(conv2,[1,3,3,1], [1,2,2,1], "alex_pool2")
#normalization after conv2
norm2 = tf.nn.local_response_normalization(pool2, name = "alex_norm2")
#conv3 layer with relu
conv3 = self.conv_layer_relu(norm2, [3,3,256,384], [384], [1,1,1,1], "alex_conv3")
return conv3
def alexnet_bottom_3(self,conv3):
#conv4 layer with relu
conv4 = self.conv_layer_relu(conv3, [3,3,384,384], [384], [1,1,1,1], "alex_conv4")
#conv5 layer with relu
conv5 = self.conv_layer_relu(conv4, [3,3,384,256], [256], [1,1,1,1], "alex_conv5")
#maxpool_2 after conv5
pool3 = self.maxpool(conv5,[1,3,3,1], [1,2,2,1], "alex_pool3")
#stretching data into array for fc layers
x2 = tf.reshape(pool3,[-1, 6*6*256])
#fc6 with relu
fc6 = self.fc_relu(x2, [6*6*256, 4096], [4096], "alex_fc6")
#dropout for fc6
dropout_fc6 = self.dropout(fc6, "alex_drop_fc6")
#fc7 with relu
fc7 = self.fc_relu(dropout_fc6, [4096,4096], [4096], "alex_fc7")
#dropout for fc7
dropout_fc7 = self.dropout(fc7, "alex_drop_fc7")
#fc8 or output WITHOUT acivation and dropout
out = self.fc_relu(dropout_fc7, [4096, self.NUM_CLASSES], [self.NUM_CLASSES], "alex_out", relu = 'FALSE')
return out
def alexnet_fused3(self,x_1,x_2):
stream1=self.alexnet_stream_3(x_1,'stream_1') # top stream
stream2=self.alexnet_stream_3(x_2,'stream_2') # top stream
#fusion by averaging
fuse_point=tf.add(stream1,stream2)
fuse_point=tf.scalar_mul(1.0/2.0,fuse_point)
# joining the network
fused_output=self.alexnet_bottom_3(fuse_point) # bottom stream
return fused_output
def alexnet_stream_4(self,x,scope_name):
with tf.variable_scope(scope_name):
#reshaping into 4d tensor
x = tf.reshape(x , [-1, 224,224,3])
#conv1 layer with relu
conv1 = self.conv_layer_relu(x, [11,11,3,96], [96], [1,4,4,1], "alex_conv1")
#maxpool_1
pool1 = self.maxpool(conv1,[1,3,3,1], [1,2,2,1], "alex_pool1")
#normalization layer after conv1
norm1 = tf.nn.local_response_normalization(pool1, name = "alex_norm1")
#conv2 layer with relu
conv2 = self.conv_layer_relu(norm1, [5,5,96,256], [256], [1,1,1,1], "alex_conv2")
#maxpool_2
pool2 = self.maxpool(conv2,[1,3,3,1], [1,2,2,1], "alex_pool2")
#normalization after conv2
norm2 = tf.nn.local_response_normalization(pool2, name = "alex_norm2")
#conv3 layer with relu
conv3 = self.conv_layer_relu(norm2, [3,3,256,384], [384], [1,1,1,1], "alex_conv3")
#conv4 layer with relu
conv4 = self.conv_layer_relu(conv3, [3,3,384,384], [384], [1,1,1,1], "alex_conv4")
return conv4
def alexnet_bottom_4(self,conv4):
#conv5 layer with relu
conv5 = self.conv_layer_relu(conv4, [3,3,384,256], [256], [1,1,1,1], "alex_conv5")
#maxpool_2 after conv5
pool3 = self.maxpool(conv5,[1,3,3,1], [1,2,2,1], "alex_pool3")
#stretching data into array for fc layers
x2 = tf.reshape(pool3,[-1, 6*6*256])
#fc6 with relu
fc6 = self.fc_relu(x2, [6*6*256, 4096], [4096], "alex_fc6")
#dropout for fc6
dropout_fc6 = self.dropout(fc6, "alex_drop_fc6")
#fc7 with relu
fc7 = self.fc_relu(dropout_fc6, [4096,4096], [4096], "alex_fc7")
#dropout for fc7
dropout_fc7 = self.dropout(fc7, "alex_drop_fc7")
#fc8 or output WITHOUT acivation and dropout
out = self.fc_relu(dropout_fc7, [4096, self.NUM_CLASSES], [self.NUM_CLASSES], "alex_out", relu = 'FALSE')
return out
def alexnet_fused4(self,x_1,x_2):
stream1=self.alexnet_stream_4(x_1,'stream_1') # top stream
stream2=self.alexnet_stream_4(x_2,'stream_2') # top stream
#fusion by averaging
fuse_point=tf.add(stream1,stream2)
fuse_point=tf.scalar_mul(1.0/2.0,fuse_point)
# joining the network
fused_output=self.alexnet_bottom_4(fuse_point) # bottom stream
return fused_output
def alexnet_stream_5(self,x,scope_name):
with tf.variable_scope(scope_name):
#reshaping into 4d tensor
x = tf.reshape(x , [-1, 224,224,3])
#conv1 layer with relu
conv1 = self.conv_layer_relu(x, [11,11,3,96], [96], [1,4,4,1], "alex_conv1")
#maxpool_1
pool1 = self.maxpool(conv1,[1,3,3,1], [1,2,2,1], "alex_pool1")
#normalization layer after conv1
norm1 = tf.nn.local_response_normalization(pool1, name = "alex_norm1")
#conv2 layer with relu
conv2 = self.conv_layer_relu(norm1, [5,5,96,256], [256], [1,1,1,1], "alex_conv2")
#maxpool_2
pool2 = self.maxpool(conv2,[1,3,3,1], [1,2,2,1], "alex_pool2")
#normalization after conv2
norm2 = tf.nn.local_response_normalization(pool2, name = "alex_norm2")
#conv3 layer with relu
conv3 = self.conv_layer_relu(norm2, [3,3,256,384], [384], [1,1,1,1], "alex_conv3")
#conv4 layer with relu
conv4 = self.conv_layer_relu(conv3, [3,3,384,384], [384], [1,1,1,1], "alex_conv4")
#conv5 layer with relu
conv5 = self.conv_layer_relu(conv4, [3,3,384,256], [256], [1,1,1,1], "alex_conv5")
#maxpool_2 after conv5
pool3 = self.maxpool(conv5,[1,3,3,1], [1,2,2,1], "alex_pool3")
return pool3
def alexnet_bottom_5(self,pool3):
#stretching data into array for fc layers
x2 = tf.reshape(pool3,[-1, 6*6*256])
#fc6 with relu
fc6 = self.fc_relu(x2, [6*6*256, 4096], [4096], "alex_fc6")
#dropout for fc6
dropout_fc6 = self.dropout(fc6, "alex_drop_fc6")
#fc7 with relu
fc7 = self.fc_relu(dropout_fc6, [4096,4096], [4096], "alex_fc7")
#dropout for fc7
dropout_fc7 = self.dropout(fc7, "alex_drop_fc7")
#fc8 or output WITHOUT acivation and dropout
out = self.fc_relu(dropout_fc7, [4096, self.NUM_CLASSES], [self.NUM_CLASSES], "alex_out", relu = 'FALSE')
return out
def alexnet_fused5(self,x_1,x_2):
stream1=self.alexnet_stream_5(x_1,'stream_1') # top stream
stream2=self.alexnet_stream_5(x_2,'stream_2') # top stream
#fusion by averaging
fuse_point=tf.add(stream1,stream2)
fuse_point=tf.scalar_mul(1.0/2.0,fuse_point)
# joining the network
fused_output=self.alexnet_bottom_5(fuse_point) # bottom stream
return fused_output
def alexnet_stream_6(self,x,scope_name):
with tf.variable_scope(scope_name):
#reshaping into 4d tensor
x = tf.reshape(x , [-1, 224,224,3])
#conv1 layer with relu
conv1 = self.conv_layer_relu(x, [11,11,3,96], [96], [1,4,4,1], "alex_conv1")
#maxpool_1
pool1 = self.maxpool(conv1,[1,3,3,1], [1,2,2,1], "alex_pool1")
#normalization layer after conv1
norm1 = tf.nn.local_response_normalization(pool1, name = "alex_norm1")
#conv2 layer with relu
conv2 = self.conv_layer_relu(norm1, [5,5,96,256], [256], [1,1,1,1], "alex_conv2")
#maxpool_2
pool2 = self.maxpool(conv2,[1,3,3,1], [1,2,2,1], "alex_pool2")
#normalization after conv2
norm2 = tf.nn.local_response_normalization(pool2, name = "alex_norm2")
#conv3 layer with relu
conv3 = self.conv_layer_relu(norm2, [3,3,256,384], [384], [1,1,1,1], "alex_conv3")
#conv4 layer with relu
conv4 = self.conv_layer_relu(conv3, [3,3,384,384], [384], [1,1,1,1], "alex_conv4")
#conv5 layer with relu
conv5 = self.conv_layer_relu(conv4, [3,3,384,256], [256], [1,1,1,1], "alex_conv5")
#maxpool_2 after conv5
pool3 = self.maxpool(conv5,[1,3,3,1], [1,2,2,1], "alex_pool3")
#stretching data into array for fc layers
x2 = tf.reshape(pool3,[-1, 6*6*256],name='alex_linear')
#fc6 with relu
fc6 = self.fc_relu(x2, [6*6*256, 4096], [4096], "alex_fc6")
#dropout for fc6
dropout_fc6 = self.dropout(fc6, "alex_drop_fc6")
return dropout_fc6
def alexnet_bottom_6(self,dropout_fc6):
#fc7 with relu
fc7 = self.fc_relu(dropout_fc6, [4096,4096], [4096], "alex_fc7")
#dropout for fc7
dropout_fc7 = self.dropout(fc7, "alex_drop_fc7")
#fc8 or output WITHOUT acivation and dropout
out = self.fc_relu(dropout_fc7, [4096, self.NUM_CLASSES], [self.NUM_CLASSES], "alex_out", relu = 'FALSE')
return out
def alexnet_fused6(self,x_1,x_2):
stream1=self.alexnet_stream_6(x_1,'stream_1') # top stream
stream2=self.alexnet_stream_6(x_2,'stream_2') # top stream
#fusion by averaging
fuse_point=tf.add(stream1,stream2)
fuse_point=tf.scalar_mul(1.0/2.0,fuse_point)
# joining the network
fused_output=self.alexnet_bottom_6(fuse_point) # bottom stream
return fused_output
#Implementation of VGG net
def vgg_net(self,x):
#reshaping into 4d tensor
x = tf.reshape(x , [-1, 224,224,3])
#conv1_1 layer with relu
conv1_1 = self.conv_layer_relu(x, [3,3,3,64], [64], [1,1,1,1], "vgg_conv1_1")
#conv1_2 layer with relu
conv1_2 = self.conv_layer_relu(conv1_1, [3,3,64,64], [64], [1,1,1,1], "vgg_conv1_2")
#maxpool 1
pool1 = self.maxpool(conv1_2,[1,2,2,1], [1,2,2,1], "vgg_pool1")
#norm layer after pool1
norm1 = tf.nn.local_response_normalization(pool1, name = "vgg_norm1")
#conv2_1 layer with relu
conv2_1 = self.conv_layer_relu(norm1, [3,3,64,128], [128], [1,1,1,1], "vgg_conv2_1")
#conv2_2 layer with relu
conv2_2 = self.conv_layer_relu(conv2_1, [3,3,128,128], [128], [1,1,1,1], "vgg_conv2_2")
#maxpool 2
pool2 = self.maxpool(conv2_2,[1,2,2,1], [1,2,2,1], "vgg_pool2")
#norm layer after pool2
norm2 = tf.nn.local_response_normalization(pool2, name = "vgg_norm2")
#conv3_1 layer with relu
conv3_1 = self.conv_layer_relu(norm2, [3,3,128,256], [256], [1,1,1,1], "vgg_conv3_1")
#conv3_2 layer with relu
conv3_2 = self.conv_layer_relu(conv3_1, [3,3,256,256], [256], [1,1,1,1], "vgg_conv3_2")
#conv3_3 layer with relu
conv3_3 = self.conv_layer_relu(conv3_2, [3,3,256,256], [256], [1,1,1,1], "vgg_conv3_3")
#maxpool 3
pool3 = self.maxpool(conv3_3,[1,2,2,1], [1,2,2,1], "vgg_pool3")
#norm layer after pool3
norm3 = tf.nn.local_response_normalization(pool3, name = "vgg_norm3")
#conv4_1 layer with relu
conv4_1 = self.conv_layer_relu(norm3, [3,3,256,512], [512], [1,1,1,1], "vgg_conv4_1")
#conv4_2 layer with relu
conv4_2 = self.conv_layer_relu(conv4_1, [3,3,512,512], [512], [1,1,1,1], "vgg_conv4_2")
#conv4_3 layer with relu
conv4_3 = self.conv_layer_relu(conv4_2, [3,3,512,512], [512], [1,1,1,1], "vgg_conv4_3")
#maxpool 4
pool4 = self.maxpool(conv4_3,[1,2,2,1], [1,2,2,1], "vgg_pool4")
#norm layer after pool4
norm4 = tf.nn.local_response_normalization(pool4, name = "vgg_norm4")
#conv5_1 layer with relu
conv5_1 = self.conv_layer_relu(norm4, [3,3,512,512], [512], [1,1,1,1], "vgg_conv5_1")
#conv5_2 layer with relu
conv5_2 = self.conv_layer_relu(conv5_1, [3,3,512,512], [512], [1,1,1,1], "vgg_conv5_2")
#conv5_3 layer with relu
conv5_3 = self.conv_layer_relu(conv5_2, [3,3,512,512], [512], [1,1,1,1], "vgg_conv5_3")
#maxpool 5
pool5 = self.maxpool(conv5_3,[1,2,2,1], [1,2,2,1], "vgg_pool5")
#reshaping for fc layers
x2 = tf.reshape(pool5, [-1, 7*7*512])
#fc6 with relu
fc6 = self.fc_relu(x2, [7*7*512, 4096], [4096], "vgg_fc6")
#dropout for fc6
dropout_fc6 = self.dropout(fc6, "vgg_drop_fc6")
#fc7 with relu
fc7 = self.fc_relu(dropout_fc6, [4096,4096], [4096], "vgg_fc7")
#dropout for fc7
dropout_fc7 = self.dropout(fc7, "vgg_drop_fc7")
#fc8 or output WITHOUT acivation and dropout
out = self.fc_relu(dropout_fc7, [4096, self.NUM_CLASSES],[self.NUM_CLASSES], "vgg_out", relu = 'FALSE')
return out
def vggnet_stream_2(self,x,scope_name):
with tf.variable_scope(scope_name):
#reshaping into 4d tensor
x = tf.reshape(x , [-1, 224,224,3])
#conv1_1 layer with relu
conv1_1 = self.conv_layer_relu(x, [3,3,3,64], [64], [1,1,1,1], "vgg_conv1_1")
#conv1_2 layer with relu
conv1_2 = self.conv_layer_relu(conv1_1, [3,3,64,64], [64], [1,1,1,1], "vgg_conv1_2")
#maxpool 1
pool1 = self.maxpool(conv1_2,[1,2,2,1], [1,2,2,1], "vgg_pool1")
#norm layer after pool1
norm1 = tf.nn.local_response_normalization(pool1, name = "vgg_norm1")
#conv2_1 layer with relu
conv2_1 = self.conv_layer_relu(norm1, [3,3,64,128], [128], [1,1,1,1], "vgg_conv2_1")
#conv2_2 layer with relu
conv2_2 = self.conv_layer_relu(conv2_1, [3,3,128,128], [128], [1,1,1,1], "vgg_conv2_2")
#maxpool 2
pool2 = self.maxpool(conv2_2,[1,2,2,1], [1,2,2,1], "vgg_pool2")
#norm layer after pool2
norm2 = tf.nn.local_response_normalization(pool2, name = "vgg_norm2")
return norm2
def vggnet_bottom_2(self,norm2):
#conv3_1 layer with relu
conv3_1 = self.conv_layer_relu(norm2, [3,3,128,256], [256], [1,1,1,1], "vgg_conv3_1")
#conv3_2 layer with relu
conv3_2 = self.conv_layer_relu(conv3_1, [3,3,256,256], [256], [1,1,1,1], "vgg_conv3_2")
#conv3_3 layer with relu
conv3_3 = self.conv_layer_relu(conv3_2, [3,3,256,256], [256], [1,1,1,1], "vgg_conv3_3")
#maxpool 3
pool3 = self.maxpool(conv3_3,[1,2,2,1], [1,2,2,1], "vgg_pool3")
#norm layer after pool3
norm3 = tf.nn.local_response_normalization(pool3, name = "vgg_norm3")
#conv4_1 layer with relu
conv4_1 = self.conv_layer_relu(norm3, [3,3,256,512], [512], [1,1,1,1], "vgg_conv4_1")
#conv4_2 layer with relu
conv4_2 = self.conv_layer_relu(conv4_1, [3,3,512,512], [512], [1,1,1,1], "vgg_conv4_2")
#conv4_3 layer with relu
conv4_3 = self.conv_layer_relu(conv4_2, [3,3,512,512], [512], [1,1,1,1], "vgg_conv4_3")
#maxpool 4
pool4 = self.maxpool(conv4_3,[1,2,2,1], [1,2,2,1], "vgg_pool4")
#norm layer after pool4
norm4 = tf.nn.local_response_normalization(pool4, name = "vgg_norm4")
#conv5_1 layer with relu
conv5_1 = self.conv_layer_relu(norm4, [3,3,512,512], [512], [1,1,1,1], "vgg_conv5_1")
#conv5_2 layer with relu
conv5_2 = self.conv_layer_relu(conv5_1, [3,3,512,512], [512], [1,1,1,1], "vgg_conv5_2")
#conv5_3 layer with relu
conv5_3 = self.conv_layer_relu(conv5_2, [3,3,512,512], [512], [1,1,1,1], "vgg_conv5_3")
#maxpool 5
pool5 = self.maxpool(conv5_3,[1,2,2,1], [1,2,2,1], "vgg_pool5")
#reshaping for fc layers
x2 = tf.reshape(pool5, [-1, 7*7*512])
#fc6 with relu
fc6 = self.fc_relu(x2, [7*7*512, 4096], [4096], "vgg_fc6")
#dropout for fc6
dropout_fc6 = self.dropout(fc6, "vgg_drop_fc6")
#fc7 with relu
fc7 = self.fc_relu(dropout_fc6, [4096,4096], [4096], "vgg_fc7")
#dropout for fc7
dropout_fc7 = self.dropout(fc7, "vgg_drop_fc7")
#fc8 or output WITHOUT acivation and dropout
out = self.fc_relu(dropout_fc7, [4096, self.NUM_CLASSES],[self.NUM_CLASSES], "vgg_out", relu = 'FALSE')
return out
def vggnet_fused2(self,x_1,x_2):
stream1=self.vggnet_stream_2(x_1,'stream_1') # top stream
stream2=self.vggnet_stream_2(x_2,'stream_2') # top stream
#fusion by averaging
fuse_point=tf.add(stream1,stream2)
fuse_point=tf.scalar_mul(1.0/2.0,fuse_point)
# joining the network
fused_output=self.vggnet_bottom_2(fuse_point) # bottom stream
return fused_output
def vggnet_stream_3(self,x,scope_name):
with tf.variable_scope(scope_name):
#reshaping into 4d tensor
x = tf.reshape(x , [-1, 224,224,3])
#conv1_1 layer with relu
conv1_1 = self.conv_layer_relu(x, [3,3,3,64], [64], [1,1,1,1], "vgg_conv1_1")
#conv1_2 layer with relu
conv1_2 = self.conv_layer_relu(conv1_1, [3,3,64,64], [64], [1,1,1,1], "vgg_conv1_2")
#maxpool 1
pool1 = self.maxpool(conv1_2,[1,2,2,1], [1,2,2,1], "vgg_pool1")
#norm layer after pool1
norm1 = tf.nn.local_response_normalization(pool1, name = "vgg_norm1")
#conv2_1 layer with relu
conv2_1 = self.conv_layer_relu(norm1, [3,3,64,128], [128], [1,1,1,1], "vgg_conv2_1")
#conv2_2 layer with relu
conv2_2 = self.conv_layer_relu(conv2_1, [3,3,128,128], [128], [1,1,1,1], "vgg_conv2_2")
#maxpool 2
pool2 = self.maxpool(conv2_2,[1,2,2,1], [1,2,2,1], "vgg_pool2")
#norm layer after pool2
norm2 = tf.nn.local_response_normalization(pool2, name = "vgg_norm2")
#conv3_1 layer with relu
conv3_1 = self.conv_layer_relu(norm2, [3,3,128,256], [256], [1,1,1,1], "vgg_conv3_1")
#conv3_2 layer with relu
conv3_2 = self.conv_layer_relu(conv3_1, [3,3,256,256], [256], [1,1,1,1], "vgg_conv3_2")
#conv3_3 layer with relu
conv3_3 = self.conv_layer_relu(conv3_2, [3,3,256,256], [256], [1,1,1,1], "vgg_conv3_3")
#maxpool 3
pool3 = self.maxpool(conv3_3,[1,2,2,1], [1,2,2,1], "vgg_pool3")
#norm layer after pool3
norm3 = tf.nn.local_response_normalization(pool3, name = "vgg_norm3")
return norm3
def vggnet_bottom_3(self,norm3):
#conv4_1 layer with relu
conv4_1 = self.conv_layer_relu(norm3, [3,3,256,512], [512], [1,1,1,1], "vgg_conv4_1")
#conv4_2 layer with relu
conv4_2 = self.conv_layer_relu(conv4_1, [3,3,512,512], [512], [1,1,1,1], "vgg_conv4_2")
#conv4_3 layer with relu
conv4_3 = self.conv_layer_relu(conv4_2, [3,3,512,512], [512], [1,1,1,1], "vgg_conv4_3")
#maxpool 4
pool4 = self.maxpool(conv4_3,[1,2,2,1], [1,2,2,1], "vgg_pool4")
#norm layer after pool4
norm4 = tf.nn.local_response_normalization(pool4, name = "vgg_norm4")
#conv5_1 layer with relu
conv5_1 = self.conv_layer_relu(norm4, [3,3,512,512], [512], [1,1,1,1], "vgg_conv5_1")
#conv5_2 layer with relu
conv5_2 = self.conv_layer_relu(conv5_1, [3,3,512,512], [512], [1,1,1,1], "vgg_conv5_2")
#conv5_3 layer with relu
conv5_3 = self.conv_layer_relu(conv5_2, [3,3,512,512], [512], [1,1,1,1], "vgg_conv5_3")
#maxpool 5
pool5 = self.maxpool(conv5_3,[1,2,2,1], [1,2,2,1], "vgg_pool5")
#reshaping for fc layers
x2 = tf.reshape(pool5, [-1, 7*7*512])
#fc6 with relu
fc6 = self.fc_relu(x2, [7*7*512, 4096], [4096], "vgg_fc6")
#dropout for fc6
dropout_fc6 = self.dropout(fc6, "vgg_drop_fc6")
#fc7 with relu
fc7 = self.fc_relu(dropout_fc6, [4096,4096], [4096], "vgg_fc7")
#dropout for fc7
dropout_fc7 = self.dropout(fc7, "vgg_drop_fc7")
#fc8 or output WITHOUT acivation and dropout
out = self.fc_relu(dropout_fc7, [4096, self.NUM_CLASSES],[self.NUM_CLASSES], "vgg_out", relu = 'FALSE')
return out
def vggnet_fused3(self,x_1,x_2):
stream1=self.vggnet_stream_3(x_1,'stream_1') # top stream
stream2=self.vggnet_stream_3(x_2,'stream_2') # top stream
#fusion by averaging
fuse_point=tf.add(stream1,stream2)
fuse_point=tf.scalar_mul(1.0/2.0,fuse_point)
# joining the network
fused_output=self.vggnet_bottom_3(fuse_point) # bottom stream
return fused_output
def vggnet_stream_4(self,x,scope_name):
with tf.variable_scope(scope_name):
#reshaping into 4d tensor
x = tf.reshape(x , [-1, 224,224,3])
#conv1_1 layer with relu
conv1_1 = self.conv_layer_relu(x, [3,3,3,64], [64], [1,1,1,1], "vgg_conv1_1")
#conv1_2 layer with relu
conv1_2 = self.conv_layer_relu(conv1_1, [3,3,64,64], [64], [1,1,1,1], "vgg_conv1_2")
#maxpool 1
pool1 = self.maxpool(conv1_2,[1,2,2,1], [1,2,2,1], "vgg_pool1")
#norm layer after pool1
norm1 = tf.nn.local_response_normalization(pool1, name = "vgg_norm1")
#conv2_1 layer with relu
conv2_1 = self.conv_layer_relu(norm1, [3,3,64,128], [128], [1,1,1,1], "vgg_conv2_1")
#conv2_2 layer with relu
conv2_2 = self.conv_layer_relu(conv2_1, [3,3,128,128], [128], [1,1,1,1], "vgg_conv2_2")
#maxpool 2
pool2 = self.maxpool(conv2_2,[1,2,2,1], [1,2,2,1], "vgg_pool2")
#norm layer after pool2
norm2 = tf.nn.local_response_normalization(pool2, name = "vgg_norm2")
#conv3_1 layer with relu
conv3_1 = self.conv_layer_relu(norm2, [3,3,128,256], [256], [1,1,1,1], "vgg_conv3_1")
#conv3_2 layer with relu
conv3_2 = self.conv_layer_relu(conv3_1, [3,3,256,256], [256], [1,1,1,1], "vgg_conv3_2")
#conv3_3 layer with relu
conv3_3 = self.conv_layer_relu(conv3_2, [3,3,256,256], [256], [1,1,1,1], "vgg_conv3_3")
#maxpool 3
pool3 = self.maxpool(conv3_3,[1,2,2,1], [1,2,2,1], "vgg_pool3")
#norm layer after pool3
norm3 = tf.nn.local_response_normalization(pool3, name = "vgg_norm3")
#conv4_1 layer with relu
conv4_1 = self.conv_layer_relu(norm3, [3,3,256,512], [512], [1,1,1,1], "vgg_conv4_1")
#conv4_2 layer with relu
conv4_2 = self.conv_layer_relu(conv4_1, [3,3,512,512], [512], [1,1,1,1], "vgg_conv4_2")
#conv4_3 layer with relu
conv4_3 = self.conv_layer_relu(conv4_2, [3,3,512,512], [512], [1,1,1,1], "vgg_conv4_3")
#maxpool 4
pool4 = self.maxpool(conv4_3,[1,2,2,1], [1,2,2,1], "vgg_pool4")
#norm layer after pool4
norm4 = tf.nn.local_response_normalization(pool4, name = "vgg_norm4")
return norm4
def vggnet_bottom_4(self,norm4):
#conv5_1 layer with relu
conv5_1 = self.conv_layer_relu(norm4, [3,3,512,512], [512], [1,1,1,1], "vgg_conv5_1")
#conv5_2 layer with relu
conv5_2 = self.conv_layer_relu(conv5_1, [3,3,512,512], [512], [1,1,1,1], "vgg_conv5_2")
#conv5_3 layer with relu
conv5_3 = self.conv_layer_relu(conv5_2, [3,3,512,512], [512], [1,1,1,1], "vgg_conv5_3")
#maxpool 5
pool5 = self.maxpool(conv5_3,[1,2,2,1], [1,2,2,1], "vgg_pool5")
#reshaping for fc layers
x2 = tf.reshape(pool5, [-1, 7*7*512])
#fc6 with relu
fc6 = self.fc_relu(x2, [7*7*512, 4096], [4096], "vgg_fc6")
#dropout for fc6
dropout_fc6 = self.dropout(fc6, "vgg_drop_fc6")
#fc7 with relu
fc7 = self.fc_relu(dropout_fc6, [4096,4096], [4096], "vgg_fc7")
#dropout for fc7
dropout_fc7 = self.dropout(fc7, "vgg_drop_fc7")
#fc8 or output WITHOUT acivation and dropout
out = self.fc_relu(dropout_fc7, [4096, self.NUM_CLASSES],[self.NUM_CLASSES], "vgg_out", relu = 'FALSE')
return out
def vggnet_fused4(self,x_1,x_2):
stream1=self.vggnet_stream_4(x_1,'stream_1') # top stream
stream2=self.vggnet_stream_4(x_2,'stream_2') # top stream
#fusion by averaging
fuse_point=tf.add(stream1,stream2)
fuse_point=tf.scalar_mul(1.0/2.0,fuse_point)
# joining the network
fused_output=self.vggnet_bottom_4(fuse_point) # bottom stream
return fused_output
def vggnet_stream_5(self,x,scope_name):
with tf.variable_scope(scope_name):
#reshaping into 4d tensor
x = tf.reshape(x , [-1, 224,224,3])
#conv1_1 layer with relu
conv1_1 = self.conv_layer_relu(x, [3,3,3,64], [64], [1,1,1,1], "vgg_conv1_1")
#conv1_2 layer with relu
conv1_2 = self.conv_layer_relu(conv1_1, [3,3,64,64], [64], [1,1,1,1], "vgg_conv1_2")
#maxpool 1
pool1 = self.maxpool(conv1_2,[1,2,2,1], [1,2,2,1], "vgg_pool1")
#norm layer after pool1
norm1 = tf.nn.local_response_normalization(pool1, name = "vgg_norm1")
#conv2_1 layer with relu
conv2_1 = self.conv_layer_relu(norm1, [3,3,64,128], [128], [1,1,1,1], "vgg_conv2_1")
#conv2_2 layer with relu
conv2_2 = self.conv_layer_relu(conv2_1, [3,3,128,128], [128], [1,1,1,1], "vgg_conv2_2")
#maxpool 2
pool2 = self.maxpool(conv2_2,[1,2,2,1], [1,2,2,1], "vgg_pool2")
#norm layer after pool2
norm2 = tf.nn.local_response_normalization(pool2, name = "vgg_norm2")
#conv3_1 layer with relu
conv3_1 = self.conv_layer_relu(norm2, [3,3,128,256], [256], [1,1,1,1], "vgg_conv3_1")
#conv3_2 layer with relu
conv3_2 = self.conv_layer_relu(conv3_1, [3,3,256,256], [256], [1,1,1,1], "vgg_conv3_2")
#conv3_3 layer with relu
conv3_3 = self.conv_layer_relu(conv3_2, [3,3,256,256], [256], [1,1,1,1], "vgg_conv3_3")
#maxpool 3
pool3 = self.maxpool(conv3_3,[1,2,2,1], [1,2,2,1], "vgg_pool3")
#norm layer after pool3
norm3 = tf.nn.local_response_normalization(pool3, name = "vgg_norm3")
#conv4_1 layer with relu
conv4_1 = self.conv_layer_relu(norm3, [3,3,256,512], [512], [1,1,1,1], "vgg_conv4_1")
#conv4_2 layer with relu
conv4_2 = self.conv_layer_relu(conv4_1, [3,3,512,512], [512], [1,1,1,1], "vgg_conv4_2")
#conv4_3 layer with relu
conv4_3 = self.conv_layer_relu(conv4_2, [3,3,512,512], [512], [1,1,1,1], "vgg_conv4_3")
#maxpool 4
pool4 = self.maxpool(conv4_3,[1,2,2,1], [1,2,2,1], "vgg_pool4")
#norm layer after pool4
norm4 = tf.nn.local_response_normalization(pool4, name = "vgg_norm4")
#conv5_1 layer with relu
conv5_1 = self.conv_layer_relu(norm4, [3,3,512,512], [512], [1,1,1,1], "vgg_conv5_1")
#conv5_2 layer with relu
conv5_2 = self.conv_layer_relu(conv5_1, [3,3,512,512], [512], [1,1,1,1], "vgg_conv5_2")
#conv5_3 layer with relu
conv5_3 = self.conv_layer_relu(conv5_2, [3,3,512,512], [512], [1,1,1,1], "vgg_conv5_3")
#maxpool 5
pool5 = self.maxpool(conv5_3,[1,2,2,1], [1,2,2,1], "vgg_pool5")
return pool5
def vggnet_bottom_5(self,pool5):
#reshaping for fc layers
x2 = tf.reshape(pool5, [-1, 7*7*512])
#fc6 with relu
fc6 = self.fc_relu(x2, [7*7*512, 4096], [4096], "vgg_fc6")
#dropout for fc6
dropout_fc6 = self.dropout(fc6, "vgg_drop_fc6")
#fc7 with relu
fc7 = self.fc_relu(dropout_fc6, [4096,4096], [4096], "vgg_fc7")
#dropout for fc7
dropout_fc7 = self.dropout(fc7, "vgg_drop_fc7")
#fc8 or output WITHOUT acivation and dropout
out = self.fc_relu(dropout_fc7, [4096, self.NUM_CLASSES],[self.NUM_CLASSES], "vgg_out", relu = 'FALSE')
return out
def vggnet_fused5(self,x_1,x_2):
stream1=self.vggnet_stream_5(x_1,'stream_1') # top stream
stream2=self.vggnet_stream_5(x_2,'stream_2') # top stream
#fusion by averaging
fuse_point=tf.add(stream1,stream2)
fuse_point=tf.scalar_mul(1.0/2.0,fuse_point)
# joining the network
fused_output=self.vggnet_bottom_5(fuse_point) # bottom stream
return fused_output
def vggnet_stream_6(self,x,scope_name):
with tf.variable_scope(scope_name):
#reshaping into 4d tensor
x = tf.reshape(x , [-1, 224,224,3])
#conv1_1 layer with relu
conv1_1 = self.conv_layer_relu(x, [3,3,3,64], [64], [1,1,1,1], "vgg_conv1_1")
#conv1_2 layer with relu
conv1_2 = self.conv_layer_relu(conv1_1, [3,3,64,64], [64], [1,1,1,1], "vgg_conv1_2")
#maxpool 1
pool1 = self.maxpool(conv1_2,[1,2,2,1], [1,2,2,1], "vgg_pool1")
#norm layer after pool1
norm1 = tf.nn.local_response_normalization(pool1, name = "vgg_norm1")
#conv2_1 layer with relu
conv2_1 = self.conv_layer_relu(norm1, [3,3,64,128], [128], [1,1,1,1], "vgg_conv2_1")
#conv2_2 layer with relu
conv2_2 = self.conv_layer_relu(conv2_1, [3,3,128,128], [128], [1,1,1,1], "vgg_conv2_2")
#maxpool 2
pool2 = self.maxpool(conv2_2,[1,2,2,1], [1,2,2,1], "vgg_pool2")
#norm layer after pool2
norm2 = tf.nn.local_response_normalization(pool2, name = "vgg_norm2")
#conv3_1 layer with relu
conv3_1 = self.conv_layer_relu(norm2, [3,3,128,256], [256], [1,1,1,1], "vgg_conv3_1")
#conv3_2 layer with relu
conv3_2 = self.conv_layer_relu(conv3_1, [3,3,256,256], [256], [1,1,1,1], "vgg_conv3_2")
#conv3_3 layer with relu
conv3_3 = self.conv_layer_relu(conv3_2, [3,3,256,256], [256], [1,1,1,1], "vgg_conv3_3")
#maxpool 3
pool3 = self.maxpool(conv3_3,[1,2,2,1], [1,2,2,1], "vgg_pool3")
#norm layer after pool3
norm3 = tf.nn.local_response_normalization(pool3, name = "vgg_norm3")
#conv4_1 layer with relu
conv4_1 = self.conv_layer_relu(norm3, [3,3,256,512], [512], [1,1,1,1], "vgg_conv4_1")
#conv4_2 layer with relu
conv4_2 = self.conv_layer_relu(conv4_1, [3,3,512,512], [512], [1,1,1,1], "vgg_conv4_2")
#conv4_3 layer with relu
conv4_3 = self.conv_layer_relu(conv4_2, [3,3,512,512], [512], [1,1,1,1], "vgg_conv4_3")
#maxpool 4
pool4 = self.maxpool(conv4_3,[1,2,2,1], [1,2,2,1], "vgg_pool4")
#norm layer after pool4
norm4 = tf.nn.local_response_normalization(pool4, name = "vgg_norm4")
#conv5_1 layer with relu
conv5_1 = self.conv_layer_relu(norm4, [3,3,512,512], [512], [1,1,1,1], "vgg_conv5_1")
#conv5_2 layer with relu
conv5_2 = self.conv_layer_relu(conv5_1, [3,3,512,512], [512], [1,1,1,1], "vgg_conv5_2")
#conv5_3 layer with relu
conv5_3 = self.conv_layer_relu(conv5_2, [3,3,512,512], [512], [1,1,1,1], "vgg_conv5_3")
#maxpool 5
pool5 = self.maxpool(conv5_3,[1,2,2,1], [1,2,2,1], "vgg_pool5")
#reshaping for fc layers
x2 = tf.reshape(pool5, [-1, 7*7*512])
#fc6 with relu
fc6 = self.fc_relu(x2, [7*7*512, 4096], [4096], "vgg_fc6")
#dropout for fc6
dropout_fc6 = self.dropout(fc6, "vgg_drop_fc6")
return dropout_fc6
def vggnet_bottom_6(self,dropout_fc6):
#fc7 with relu
fc7 = self.fc_relu(dropout_fc6, [4096,4096], [4096], "vgg_fc7")
#dropout for fc7
dropout_fc7 = self.dropout(fc7, "vgg_drop_fc7")
#fc8 or output WITHOUT acivation and dropout
out = self.fc_relu(dropout_fc7, [4096, self.NUM_CLASSES],[self.NUM_CLASSES], "vgg_out", relu = 'FALSE')
return out
def vggnet_fused6(self,x_1,x_2):
stream1=self.vggnet_stream_6(x_1,'stream_1') # top stream
stream2=self.vggnet_stream_6(x_2,'stream_2') # top stream
#fusion by averaging
fuse_point=tf.add(stream1,stream2)
fuse_point=tf.scalar_mul(1.0/2.0,fuse_point)
# joining the network
fused_output=self.vggnet_bottom_6(fuse_point) # bottom stream
return fused_output
| 38.979499
| 107
| 0.680926
| 6,207
| 34,224
| 3.54213
| 0.022716
| 0.032202
| 0.027836
| 0.083508
| 0.962567
| 0.957473
| 0.953652
| 0.953652
| 0.950923
| 0.935777
| 0
| 0.138051
| 0.153167
| 34,224
| 878
| 108
| 38.979499
| 0.620558
| 0.191006
| 0
| 0.793911
| 0
| 0
| 0.102566
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086651
| false
| 0
| 0.004684
| 0.004684
| 0.177986
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6caf2672b0e3927be1c1d07625f7c065c20ab07c
| 100
|
py
|
Python
|
tests/conftest.py
|
hoogamaphone/chromathicity
|
cc882f69ee3e571662be89b2d8ff5a1b34768414
|
[
"BSD-3-Clause"
] | null | null | null |
tests/conftest.py
|
hoogamaphone/chromathicity
|
cc882f69ee3e571662be89b2d8ff5a1b34768414
|
[
"BSD-3-Clause"
] | 2
|
2017-06-29T18:34:34.000Z
|
2017-06-29T18:35:50.000Z
|
tests/conftest.py
|
hoogamaphone/chromathicity
|
cc882f69ee3e571662be89b2d8ff5a1b34768414
|
[
"BSD-3-Clause"
] | null | null | null |
from numpy import set_printoptions
def pytest_runtest_setup():
set_printoptions(precision=10)
| 16.666667
| 34
| 0.81
| 13
| 100
| 5.923077
| 0.846154
| 0.38961
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022989
| 0.13
| 100
| 5
| 35
| 20
| 0.862069
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 8
|
9f029b0dd52c36de778b26f95bf5cd987740c393
| 243
|
py
|
Python
|
pyscf/prop/ssc/__init__.py
|
nmardirossian/pyscf
|
57c8912dcfcc1157a822feede63df54ed1067115
|
[
"BSD-2-Clause"
] | 1
|
2018-05-02T19:55:30.000Z
|
2018-05-02T19:55:30.000Z
|
pyscf/prop/ssc/__init__.py
|
nmardirossian/pyscf
|
57c8912dcfcc1157a822feede63df54ed1067115
|
[
"BSD-2-Clause"
] | null | null | null |
pyscf/prop/ssc/__init__.py
|
nmardirossian/pyscf
|
57c8912dcfcc1157a822feede63df54ed1067115
|
[
"BSD-2-Clause"
] | 1
|
2018-12-06T03:10:50.000Z
|
2018-12-06T03:10:50.000Z
|
#!/usr/bin/env python
from pyscf.prop.ssc import rhf
from pyscf.prop.ssc import uhf
from pyscf.prop.ssc import dhf
from pyscf.prop.ssc.rhf import SSC as RHF
from pyscf.prop.ssc.uhf import SSC as UHF
from pyscf.prop.ssc.dhf import SSC as DHF
| 24.3
| 41
| 0.777778
| 49
| 243
| 3.857143
| 0.265306
| 0.285714
| 0.412698
| 0.507937
| 0.582011
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.139918
| 243
| 9
| 42
| 27
| 0.904306
| 0.082305
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
9f2492453775182013994e3660350fc4889b3a60
| 6,769
|
py
|
Python
|
benchmarks/T/CopyValueBHE/CopyValue.py
|
GeoStat-Framework/ogs5py_benchmarks
|
0b6db19b87cfad36459757f99ce2458f8e12b20b
|
[
"BSD-4-Clause"
] | 3
|
2019-01-15T17:38:11.000Z
|
2020-01-07T23:44:12.000Z
|
benchmarks/T/CopyValueBHE/CopyValue.py
|
GeoStat-Framework/ogs5py_benchmarks
|
0b6db19b87cfad36459757f99ce2458f8e12b20b
|
[
"BSD-4-Clause"
] | 1
|
2020-05-12T09:18:09.000Z
|
2020-05-12T10:48:32.000Z
|
benchmarks/T/CopyValueBHE/CopyValue.py
|
GeoStat-Framework/ogs5py_benchmarks
|
0b6db19b87cfad36459757f99ce2458f8e12b20b
|
[
"BSD-4-Clause"
] | 1
|
2020-01-08T13:28:50.000Z
|
2020-01-08T13:28:50.000Z
|
# -*- coding: utf-8 -*-
from ogs5py import OGS
model = OGS(
task_root='CopyValue_root',
task_id='CopyValue',
output_dir='out',
)
model.msh.read_file('CopyValue.msh')
model.gli.read_file('CopyValue.gli')
model.pcs.add_block(
main_key='PROCESS',
PCS_TYPE='LIQUID_FLOW',
NUM_TYPE='NEW',
BOUNDARY_CONDITION_OUTPUT=[],
DEACTIVATED_SUBDOMAIN=[
[1],
[1],
],
)
model.pcs.add_block(
main_key='PROCESS',
PCS_TYPE='HEAT_TRANSPORT',
NUM_TYPE='NEW',
BOUNDARY_CONDITION_OUTPUT=[],
)
model.bc.add_block(
main_key='BOUNDARY_CONDITION',
PCS_TYPE='LIQUID_FLOW',
PRIMARY_VARIABLE='PRESSURE1',
GEO_TYPE=['POINT', 'P00'],
DIS_TYPE=['CONSTANT', 109810.0],
)
model.bc.add_block(
main_key='BOUNDARY_CONDITION',
PCS_TYPE='LIQUID_FLOW',
PRIMARY_VARIABLE='PRESSURE1',
GEO_TYPE=['POINT', 'P01'],
DIS_TYPE=['CONSTANT', 109810.0],
)
model.bc.add_block(
main_key='BOUNDARY_CONDITION',
PCS_TYPE='LIQUID_FLOW',
PRIMARY_VARIABLE='PRESSURE1',
GEO_TYPE=['POINT', 'P02'],
DIS_TYPE=['CONSTANT', 109810.0],
)
model.bc.add_block(
main_key='BOUNDARY_CONDITION',
PCS_TYPE='LIQUID_FLOW',
PRIMARY_VARIABLE='PRESSURE1',
GEO_TYPE=['POLYLINE', 'BC_LOW_LEFT'],
DIS_TYPE=['CONSTANT', 109810.0],
)
model.bc.add_block(
main_key='BOUNDARY_CONDITION',
PCS_TYPE='LIQUID_FLOW',
PRIMARY_VARIABLE='PRESSURE1',
GEO_TYPE=['POLYLINE', 'BC_LOW_RIGHT'],
DIS_TYPE=['CONSTANT', 109810.0],
)
model.bc.add_block(
main_key='BOUNDARY_CONDITION',
PCS_TYPE='HEAT_TRANSPORT',
PRIMARY_VARIABLE='TEMPERATURE1',
GEO_TYPE=['POINT', 'P00'],
DIS_TYPE=['CONSTANT', 283],
)
model.bc.add_block(
main_key='BOUNDARY_CONDITION',
PCS_TYPE='HEAT_TRANSPORT',
PRIMARY_VARIABLE='TEMPERATURE1',
GEO_TYPE=['POINT', 'P02'],
DIS_TYPE=['CONSTANT', 283],
)
model.bc.add_block(
main_key='BOUNDARY_CONDITION',
PCS_TYPE='HEAT_TRANSPORT',
PRIMARY_VARIABLE='TEMPERATURE1',
GEO_TYPE=['POLYLINE', 'BC_UP_LEFT'],
DIS_TYPE=['CONSTANT', 303],
)
model.bc.add_block(
main_key='BOUNDARY_CONDITION',
PCS_TYPE='HEAT_TRANSPORT',
PRIMARY_VARIABLE='TEMPERATURE1',
GEO_TYPE=['POLYLINE', 'BC_LOW_RIGHT'],
DIS_TYPE=['CONSTANT', 1],
COPY_VALUE=['POINT', 'P07'],
)
model.ic.add_block(
main_key='INITIAL_CONDITION',
PCS_TYPE='LIQUID_FLOW',
PRIMARY_VARIABLE='PRESSURE1',
GEO_TYPE='DOMAIN',
DIS_TYPE=['CONSTANT', 100000.0],
)
model.ic.add_block(
main_key='INITIAL_CONDITION',
PCS_TYPE='HEAT_TRANSPORT',
PRIMARY_VARIABLE='TEMPERATURE1',
GEO_TYPE='DOMAIN',
DIS_TYPE=['CONSTANT', 283.0],
)
model.st.add_block(
main_key='SOURCE_TERM',
PCS_TYPE='LIQUID_FLOW',
PRIMARY_VARIABLE='PRESSURE1',
GEO_TYPE=['POLYLINE', 'BC_UP_LEFT'],
DIS_TYPE=['CONSTANT_NEUMANN', 0.00312],
)
model.st.add_block(
main_key='SOURCE_TERM',
PCS_TYPE='LIQUID_FLOW',
PRIMARY_VARIABLE='PRESSURE1',
GEO_TYPE=['POLYLINE', 'BC_UP_RIGHT'],
DIS_TYPE=['CONSTANT_NEUMANN', -0.00312],
)
model.mmp.add_block(
main_key='MEDIUM_PROPERTIES',
GEOMETRY_DIMENSION=2,
GEOMETRY_AREA=1.0,
POROSITY=[1, 0.25],
TORTUOSITY=[1, 1.0],
STORAGE=[1, 0.0],
PERMEABILITY_TENSOR=['ISOTROPIC', 2e-07],
DENSITY=[1, 1000],
HEAT_DISPERSION=[1, 0.01, 0.01],
)
model.mmp.add_block(
main_key='MEDIUM_PROPERTIES',
GEOMETRY_DIMENSION=2,
GEOMETRY_AREA=1.0,
POROSITY=[1, 0.25],
TORTUOSITY=[1, 1.0],
STORAGE=[1, 0.0],
PERMEABILITY_TENSOR=['ISOTROPIC', 2e-14],
DENSITY=[1, 940],
HEAT_DISPERSION=[1, 0.001, 0.001],
)
model.mmp.add_block(
main_key='MEDIUM_PROPERTIES',
GEOMETRY_DIMENSION=2,
GEOMETRY_AREA=1.0,
POROSITY=[1, 0.25],
TORTUOSITY=[1, 1.0],
STORAGE=[1, 0.0],
PERMEABILITY_TENSOR=['ISOTROPIC', 2e-14],
DENSITY=[1, 1980],
HEAT_DISPERSION=[1, 0.001, 0.001],
)
model.mmp.add_block(
main_key='MEDIUM_PROPERTIES',
GEOMETRY_DIMENSION=2,
GEOMETRY_AREA=1.0,
POROSITY=[1, 0.25],
TORTUOSITY=[1, 1.0],
STORAGE=[1, 0.0],
PERMEABILITY_TENSOR=['ISOTROPIC', 2e-12],
DENSITY=[1, 2100],
HEAT_DISPERSION=[1, 0.1, 0.1],
)
model.msp.add_block(
main_key='SOLID_PROPERTIES',
DENSITY=[1, 999.7],
THERMAL=[
['EXPANSION', 0.0],
['CAPACITY'],
[1, 4185],
['CONDUCTIVITY'],
[1, 0.56],
],
)
model.msp.add_block(
main_key='SOLID_PROPERTIES',
DENSITY=[1, 950],
THERMAL=[
['EXPANSION', 0.0],
['CAPACITY'],
[1, 1700],
['CONDUCTIVITY'],
[1, 0.42],
],
)
model.msp.add_block(
main_key='SOLID_PROPERTIES',
DENSITY=[1, 2650],
THERMAL=[
['EXPANSION', 0.0],
['CAPACITY'],
[1, 1466],
['CONDUCTIVITY'],
[1, 2.08],
],
)
model.msp.add_block(
main_key='SOLID_PROPERTIES',
DENSITY=[1, 2650],
THERMAL=[
['EXPANSION', 0.0],
['CAPACITY'],
[1, 784],
['CONDUCTIVITY'],
[1, 2.86],
],
)
model.mfp.add_block(
main_key='FLUID_PROPERTIES',
FLUID_TYPE='LIQUID',
DENSITY=[1, 1000.0],
VISCOSITY=[1, 0.001],
SPECIFIC_HEAT_CAPACITY=[1, 4185],
HEAT_CONDUCTIVITY=[1, 0.56],
)
model.num.add_block(
main_key='NUMERICS',
PCS_TYPE='LIQUID_FLOW',
ELE_GAUSS_POINTS=3,
LINEAR_SOLVER=[2, 6, 1e-10, 1000, 1.0, 1, 4],
)
model.num.add_block(
main_key='NUMERICS',
PCS_TYPE='HEAT_TRANSPORT',
ELE_GAUSS_POINTS=3,
LINEAR_SOLVER=[2, 6, 1e-10, 2000, 1.0, 1, 4],
)
model.tim.add_block(
main_key='TIME_STEPPING',
PCS_TYPE='LIQUID_FLOW',
TIME_STEPS=[100, 10],
TIME_END=3153600000,
TIME_START=0.0,
)
model.tim.add_block(
main_key='TIME_STEPPING',
PCS_TYPE='HEAT_TRANSPORT',
TIME_STEPS=[100, 10],
TIME_END=3153600000,
TIME_START=0.0,
)
model.out.add_block(
main_key='OUTPUT',
NOD_VALUES=[
['PRESSURE1'],
['HEAD'],
['VELOCITY_X1'],
['VELOCITY_Y1'],
['VELOCITY_Z1'],
['TEMPERATURE1'],
],
GEO_TYPE='DOMAIN',
DAT_TYPE='TECPLOT',
TIM_TYPE=['STEPS', 1],
)
model.out.add_block(
main_key='OUTPUT',
NOD_VALUES=[
['PRESSURE1'],
['HEAD'],
['VELOCITY_X1'],
['VELOCITY_Y1'],
['VELOCITY_Z1'],
['TEMPERATURE1'],
],
GEO_TYPE=['POINT', 'P12'],
DAT_TYPE='TECPLOT',
TIM_TYPE=['STEPS', 1],
)
model.out.add_block(
main_key='OUTPUT',
NOD_VALUES=[
['PRESSURE1'],
['HEAD'],
['VELOCITY_X1'],
['VELOCITY_Y1'],
['VELOCITY_Z1'],
['TEMPERATURE1'],
],
GEO_TYPE=['POINT', 'P13'],
DAT_TYPE='TECPLOT',
TIM_TYPE=['STEPS', 1],
)
model.write_input()
model.run_model()
| 23.750877
| 49
| 0.614566
| 869
| 6,769
| 4.504028
| 0.174914
| 0.063362
| 0.095043
| 0.118804
| 0.853602
| 0.843383
| 0.801993
| 0.788452
| 0.780276
| 0.724578
| 0
| 0.069993
| 0.206382
| 6,769
| 284
| 50
| 23.834507
| 0.6586
| 0.003102
| 0
| 0.712766
| 0
| 0
| 0.229321
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.003546
| 0
| 0.003546
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9f64efbc80ff170a46bbf3baefc2f6ce3f1cbb4b
| 49
|
py
|
Python
|
coco_lm_pytorch/__init__.py
|
lucidrains/coco-lm-pytorch
|
516c1783b9f8ec6c27bab80d3aa8521d217a18cb
|
[
"MIT"
] | 40
|
2021-03-02T18:06:46.000Z
|
2021-12-26T15:31:51.000Z
|
coco_lm_pytorch/__init__.py
|
lucidrains/coco-lm-pytorch
|
516c1783b9f8ec6c27bab80d3aa8521d217a18cb
|
[
"MIT"
] | 2
|
2021-03-19T23:01:45.000Z
|
2021-12-17T00:44:20.000Z
|
coco_lm_pytorch/__init__.py
|
lucidrains/coco-lm-pytorch
|
516c1783b9f8ec6c27bab80d3aa8521d217a18cb
|
[
"MIT"
] | 3
|
2021-03-19T23:32:56.000Z
|
2021-12-26T12:57:59.000Z
|
from coco_lm_pytorch.coco_lm_pytorch import COCO
| 24.5
| 48
| 0.897959
| 9
| 49
| 4.444444
| 0.555556
| 0.3
| 0.65
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081633
| 49
| 1
| 49
| 49
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
9810c55d4bc13e8b442c5475bb07a7da5f6ec7ed
| 7,077
|
py
|
Python
|
tests/test_formatter/snapshots/snap_test_formatter_output.py
|
toennifer/wemake-python-styleguide
|
12f942035aec4a34d38e24df89b150b88f35e021
|
[
"MIT"
] | 1
|
2021-04-10T18:14:51.000Z
|
2021-04-10T18:14:51.000Z
|
tests/test_formatter/snapshots/snap_test_formatter_output.py
|
amansr02/wemake-python-styleguide
|
681035ed21fbe28ebfb32b8807b98e8de76b64aa
|
[
"MIT"
] | 1
|
2020-12-17T19:38:28.000Z
|
2020-12-17T19:38:28.000Z
|
tests/test_formatter/snapshots/snap_test_formatter_output.py
|
amansr02/wemake-python-styleguide
|
681035ed21fbe28ebfb32b8807b98e8de76b64aa
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# snapshottest: v1 - https://goo.gl/zC4yUc
from __future__ import unicode_literals
from snapshottest import Snapshot
snapshots = Snapshot()
snapshots['test_formatter[cli_options0-regular] formatter_regular'] = '''
./tests/fixtures/formatter/formatter1.py
1:1 WPS111 Found too short name: s < 2
1:7 WPS110 Found wrong variable name: handle
2:21 WPS432 Found magic number: 200
2:21 WPS303 Found underscored number: 2_00
./tests/fixtures/formatter/formatter2.py
1:1 WPS110 Found wrong variable name: data
1:10 WPS110 Found wrong variable name: param
2:12 WPS437 Found protected attribute usage: _protected
2:31 WPS303 Found underscored number: 10_00
Full list of violations and explanations:
https://wemake-python-stylegui.de/en/xx.xx/pages/usage/violations/
'''
snapshots['test_formatter[cli_options1-regular_statistic] formatter_regular_statistic'] = '''
./tests/fixtures/formatter/formatter1.py
1:1 WPS111 Found too short name: s < 2
1:7 WPS110 Found wrong variable name: handle
2:21 WPS432 Found magic number: 200
2:21 WPS303 Found underscored number: 2_00
./tests/fixtures/formatter/formatter2.py
1:1 WPS110 Found wrong variable name: data
1:10 WPS110 Found wrong variable name: param
2:12 WPS437 Found protected attribute usage: _protected
2:31 WPS303 Found underscored number: 10_00
WPS110: Found wrong variable name: handle
1 ./tests/fixtures/formatter/formatter1.py
2 ./tests/fixtures/formatter/formatter2.py
Total: 3
WPS111: Found too short name: s < 2
1 ./tests/fixtures/formatter/formatter1.py
Total: 1
WPS303: Found underscored number: 2_00
1 ./tests/fixtures/formatter/formatter1.py
1 ./tests/fixtures/formatter/formatter2.py
Total: 2
WPS432: Found magic number: 200
1 ./tests/fixtures/formatter/formatter1.py
Total: 1
WPS437: Found protected attribute usage: _protected
1 ./tests/fixtures/formatter/formatter2.py
Total: 1
All errors: 8
Full list of violations and explanations:
https://wemake-python-stylegui.de/en/xx.xx/pages/usage/violations/
'''
snapshots['test_formatter[cli_options2-with_source] formatter_with_source'] = '''
./tests/fixtures/formatter/formatter1.py
1:1 WPS111 Found too short name: s < 2
def s(handle: int) -> int:
^
1:7 WPS110 Found wrong variable name: handle
def s(handle: int) -> int:
^
2:21 WPS432 Found magic number: 200
return handle + 2_00
^
2:21 WPS303 Found underscored number: 2_00
return handle + 2_00
^
./tests/fixtures/formatter/formatter2.py
1:1 WPS110 Found wrong variable name: data
def data(param) -> int:
^
1:10 WPS110 Found wrong variable name: param
def data(param) -> int:
^
2:12 WPS437 Found protected attribute usage: _protected
return param._protected + 10_00
^
2:31 WPS303 Found underscored number: 10_00
return param._protected + 10_00
^
Full list of violations and explanations:
https://wemake-python-stylegui.de/en/xx.xx/pages/usage/violations/
'''
snapshots['test_formatter[cli_options3-with_source_statistic] formatter_with_source_statistic'] = '''
./tests/fixtures/formatter/formatter1.py
1:1 WPS111 Found too short name: s < 2
def s(handle: int) -> int:
^
1:7 WPS110 Found wrong variable name: handle
def s(handle: int) -> int:
^
2:21 WPS432 Found magic number: 200
return handle + 2_00
^
2:21 WPS303 Found underscored number: 2_00
return handle + 2_00
^
./tests/fixtures/formatter/formatter2.py
1:1 WPS110 Found wrong variable name: data
def data(param) -> int:
^
1:10 WPS110 Found wrong variable name: param
def data(param) -> int:
^
2:12 WPS437 Found protected attribute usage: _protected
return param._protected + 10_00
^
2:31 WPS303 Found underscored number: 10_00
return param._protected + 10_00
^
WPS110: Found wrong variable name: handle
1 ./tests/fixtures/formatter/formatter1.py
2 ./tests/fixtures/formatter/formatter2.py
Total: 3
WPS111: Found too short name: s < 2
1 ./tests/fixtures/formatter/formatter1.py
Total: 1
WPS303: Found underscored number: 2_00
1 ./tests/fixtures/formatter/formatter1.py
1 ./tests/fixtures/formatter/formatter2.py
Total: 2
WPS432: Found magic number: 200
1 ./tests/fixtures/formatter/formatter1.py
Total: 1
WPS437: Found protected attribute usage: _protected
1 ./tests/fixtures/formatter/formatter2.py
Total: 1
All errors: 8
Full list of violations and explanations:
https://wemake-python-stylegui.de/en/xx.xx/pages/usage/violations/
'''
snapshots['test_formatter[cli_options4-statistic_with_source] formatter_statistic_with_source'] = '''
./tests/fixtures/formatter/formatter1.py
1:1 WPS111 Found too short name: s < 2
def s(handle: int) -> int:
^
1:7 WPS110 Found wrong variable name: handle
def s(handle: int) -> int:
^
2:21 WPS432 Found magic number: 200
return handle + 2_00
^
2:21 WPS303 Found underscored number: 2_00
return handle + 2_00
^
./tests/fixtures/formatter/formatter2.py
1:1 WPS110 Found wrong variable name: data
def data(param) -> int:
^
1:10 WPS110 Found wrong variable name: param
def data(param) -> int:
^
2:12 WPS437 Found protected attribute usage: _protected
return param._protected + 10_00
^
2:31 WPS303 Found underscored number: 10_00
return param._protected + 10_00
^
WPS110: Found wrong variable name: handle
1 ./tests/fixtures/formatter/formatter1.py
2 ./tests/fixtures/formatter/formatter2.py
Total: 3
WPS111: Found too short name: s < 2
1 ./tests/fixtures/formatter/formatter1.py
Total: 1
WPS303: Found underscored number: 2_00
1 ./tests/fixtures/formatter/formatter1.py
1 ./tests/fixtures/formatter/formatter2.py
Total: 2
WPS432: Found magic number: 200
1 ./tests/fixtures/formatter/formatter1.py
Total: 1
WPS437: Found protected attribute usage: _protected
1 ./tests/fixtures/formatter/formatter2.py
Total: 1
All errors: 8
Full list of violations and explanations:
https://wemake-python-stylegui.de/en/xx.xx/pages/usage/violations/
'''
snapshots['test_formatter_correct[cli_options0-regular] formatter_correct_regular'] = ''
snapshots['test_formatter_correct[cli_options1-regular_statistic] formatter_correct_regular_statistic'] = '''
All errors: 0
'''
snapshots['test_formatter_correct[cli_options2-with_source] formatter_correct_with_source'] = ''
snapshots['test_formatter_correct[cli_options3-with_source_statistic] formatter_correct_with_source_statistic'] = '''
All errors: 0
'''
snapshots['test_formatter_correct[cli_options4-statistic_with_source] formatter_correct_statistic_with_source'] = '''
All errors: 0
'''
| 27.219231
| 117
| 0.697895
| 961
| 7,077
| 5.027055
| 0.08845
| 0.08342
| 0.141172
| 0.089422
| 0.933347
| 0.893811
| 0.863382
| 0.863382
| 0.863382
| 0.842269
| 0
| 0.084734
| 0.209552
| 7,077
| 259
| 118
| 27.324324
| 0.77887
| 0.008761
| 0
| 0.801075
| 0
| 0.026882
| 0.952367
| 0.285368
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.010753
| 0
| 0.075269
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
e226cce8400f7adaf8fa2bc9c6e39c70d0e45a17
| 6,215
|
py
|
Python
|
gemtown/utils/abi.py
|
doramong0926/gemtown
|
2c39284e3c68f0cc11994bed0ee2abaad0ea06b6
|
[
"MIT"
] | null | null | null |
gemtown/utils/abi.py
|
doramong0926/gemtown
|
2c39284e3c68f0cc11994bed0ee2abaad0ea06b6
|
[
"MIT"
] | 5
|
2020-09-04T20:13:39.000Z
|
2022-02-17T22:03:33.000Z
|
gemtown/utils/abi.py
|
doramong0926/gemtown
|
2c39284e3c68f0cc11994bed0ee2abaad0ea06b6
|
[
"MIT"
] | null | null | null |
gemtown_abi=[{"constant":True,"inputs":[{"name":"interfaceId","type":"bytes4"}],"name":"supportsInterface","outputs":[{"name":"","type":"bool"}],"payable":False,"stateMutability":"view","type":"function"},{"constant":True,"inputs":[{"name":"tokenId","type":"uint256"}],"name":"tokenMETADATA","outputs":[{"name":"","type":"string"},{"name":"","type":"string"},{"name":"","type":"string"},{"name":"","type":"string"}],"payable":False,"stateMutability":"view","type":"function"},{"constant":True,"inputs":[],"name":"name","outputs":[{"name":"","type":"string"}],"payable":False,"stateMutability":"view","type":"function"},{"constant":True,"inputs":[{"name":"tokenId","type":"uint256"}],"name":"getApproved","outputs":[{"name":"","type":"address"}],"payable":False,"stateMutability":"view","type":"function"},{"constant":False,"inputs":[{"name":"to","type":"address"},{"name":"tokenId","type":"uint256"}],"name":"approve","outputs":[],"payable":False,"stateMutability":"nonpayable","type":"function"},{"constant":True,"inputs":[],"name":"totalSupply","outputs":[{"name":"","type":"uint256"}],"payable":False,"stateMutability":"view","type":"function"},{"constant":True,"inputs":[{"name":"tokenId","type":"uint256"}],"name":"tokenHASH","outputs":[{"name":"","type":"string"}],"payable":False,"stateMutability":"view","type":"function"},{"constant":False,"inputs":[{"name":"from","type":"address"},{"name":"to","type":"address"},{"name":"tokenId","type":"uint256"}],"name":"transferFrom","outputs":[],"payable":False,"stateMutability":"nonpayable","type":"function"},{"constant":True,"inputs":[{"name":"owner","type":"address"},{"name":"index","type":"uint256"}],"name":"tokenOfOwnerByIndex","outputs":[{"name":"","type":"uint256"}],"payable":False,"stateMutability":"view","type":"function"},{"constant":False,"inputs":[{"name":"to","type":"address"},{"name":"tokenID","type":"uint256"},{"name":"tokenURIs","type":"string"},{"name":"tokenHASH","type":"string"},{"name":"tokePUBLISHER","type":"string"},{"name":"tokenDATA","type":"string"}],"name":"mint_token","outputs":[],"payable":False,"stateMutability":"nonpayable","type":"function"},{"constant":False,"inputs":[{"name":"from","type":"address"},{"name":"to","type":"address"},{"name":"tokenId","type":"uint256"}],"name":"safeTransferFrom","outputs":[],"payable":False,"stateMutability":"nonpayable","type":"function"},{"constant":True,"inputs":[{"name":"index","type":"uint256"}],"name":"tokenByIndex","outputs":[{"name":"","type":"uint256"}],"payable":False,"stateMutability":"view","type":"function"},{"constant":True,"inputs":[{"name":"tokenId","type":"uint256"}],"name":"ownerOf","outputs":[{"name":"","type":"address"}],"payable":False,"stateMutability":"view","type":"function"},{"constant":True,"inputs":[{"name":"owner","type":"address"}],"name":"balanceOf","outputs":[{"name":"","type":"uint256"}],"payable":False,"stateMutability":"view","type":"function"},{"constant":False,"inputs":[],"name":"renounceOwnership","outputs":[],"payable":False,"stateMutability":"nonpayable","type":"function"},{"constant":False,"inputs":[{"name":"newOwner","type":"address"}],"name":"transferOwnershipOfContract","outputs":[],"payable":False,"stateMutability":"nonpayable","type":"function"},{"constant":True,"inputs":[],"name":"owner","outputs":[{"name":"","type":"address"}],"payable":False,"stateMutability":"view","type":"function"},{"constant":True,"inputs":[],"name":"isOwner","outputs":[{"name":"","type":"bool"}],"payable":False,"stateMutability":"view","type":"function"},{"constant":True,"inputs":[{"name":"tokenId","type":"uint256"}],"name":"tokenPUBLISHER","outputs":[{"name":"","type":"string"}],"payable":False,"stateMutability":"view","type":"function"},{"constant":True,"inputs":[],"name":"isOwnerOfContract","outputs":[{"name":"","type":"bool"}],"payable":False,"stateMutability":"view","type":"function"},{"constant":True,"inputs":[],"name":"symbol","outputs":[{"name":"","type":"string"}],"payable":False,"stateMutability":"view","type":"function"},{"constant":True,"inputs":[{"name":"tokenId","type":"uint256"}],"name":"tokenDATA","outputs":[{"name":"","type":"string"}],"payable":False,"stateMutability":"view","type":"function"},{"constant":False,"inputs":[{"name":"to","type":"address"},{"name":"approved","type":"bool"}],"name":"setApprovalForAll","outputs":[],"payable":False,"stateMutability":"nonpayable","type":"function"},{"constant":False,"inputs":[{"name":"from","type":"address"},{"name":"to","type":"address"},{"name":"tokenId","type":"uint256"},{"name":"_data","type":"bytes"}],"name":"safeTransferFrom","outputs":[],"payable":False,"stateMutability":"nonpayable","type":"function"},{"constant":True,"inputs":[{"name":"tokenId","type":"uint256"}],"name":"tokenURI","outputs":[{"name":"","type":"string"}],"payable":False,"stateMutability":"view","type":"function"},{"constant":True,"inputs":[{"name":"owner","type":"address"},{"name":"operator","type":"address"}],"name":"isApprovedForAll","outputs":[{"name":"","type":"bool"}],"payable":False,"stateMutability":"view","type":"function"},{"constant":False,"inputs":[{"name":"newOwner","type":"address"}],"name":"transferOwnership","outputs":[],"payable":False,"stateMutability":"nonpayable","type":"function"},{"inputs":[{"name":"name","type":"string"},{"name":"symbol","type":"string"}],"payable":False,"stateMutability":"nonpayable","type":"constructor"},{"anonymous":False,"inputs":[{"indexed":True,"name":"previousOwner","type":"address"},{"indexed":True,"name":"newOwner","type":"address"}],"name":"OwnershipTransferred","type":"event"},{"anonymous":False,"inputs":[{"indexed":True,"name":"from","type":"address"},{"indexed":True,"name":"to","type":"address"},{"indexed":True,"name":"tokenId","type":"uint256"}],"name":"Transfer","type":"event"},{"anonymous":False,"inputs":[{"indexed":True,"name":"owner","type":"address"},{"indexed":True,"name":"approved","type":"address"},{"indexed":True,"name":"tokenId","type":"uint256"}],"name":"Approval","type":"event"},{"anonymous":False,"inputs":[{"indexed":True,"name":"owner","type":"address"},{"indexed":True,"name":"operator","type":"address"},{"indexed":False,"name":"approved","type":"bool"}],"name":"ApprovalForAll","type":"event"}]
| 6,215
| 6,215
| 0.648753
| 643
| 6,215
| 6.265941
| 0.102644
| 0.069496
| 0.18764
| 0.098287
| 0.837925
| 0.785803
| 0.777116
| 0.763217
| 0.752296
| 0.718044
| 0
| 0.009815
| 0
| 6,215
| 1
| 6,215
| 6,215
| 0.638455
| 0
| 0
| 0
| 0
| 0
| 0.595077
| 0.004344
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
e22d0420d3b74170f387f6d2469278c30ddb1f1d
| 47
|
py
|
Python
|
openclassroom/__init__.py
|
openclassroomteam/openclassroom-xblock
|
f8859903ee9d9a54e4d5a387fa2885b929cc30ee
|
[
"Apache-2.0"
] | 1
|
2020-08-18T14:22:21.000Z
|
2020-08-18T14:22:21.000Z
|
openclassroom/__init__.py
|
openclassroomteam/openclassroom-xblock
|
f8859903ee9d9a54e4d5a387fa2885b929cc30ee
|
[
"Apache-2.0"
] | null | null | null |
openclassroom/__init__.py
|
openclassroomteam/openclassroom-xblock
|
f8859903ee9d9a54e4d5a387fa2885b929cc30ee
|
[
"Apache-2.0"
] | null | null | null |
from .openclassroom import OpenClassroomXBlock
| 23.5
| 46
| 0.893617
| 4
| 47
| 10.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085106
| 47
| 1
| 47
| 47
| 0.976744
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
e270ca79bcdefcf16b18a692c9807e26c87314ee
| 38
|
py
|
Python
|
hawc_hal/maptree/__init__.py
|
lundeenj/hawc_hal
|
21b7cfd28490e870d1ded39735827e63684556df
|
[
"BSD-3-Clause"
] | null | null | null |
hawc_hal/maptree/__init__.py
|
lundeenj/hawc_hal
|
21b7cfd28490e870d1ded39735827e63684556df
|
[
"BSD-3-Clause"
] | null | null | null |
hawc_hal/maptree/__init__.py
|
lundeenj/hawc_hal
|
21b7cfd28490e870d1ded39735827e63684556df
|
[
"BSD-3-Clause"
] | null | null | null |
from map_tree import map_tree_factory
| 19
| 37
| 0.894737
| 7
| 38
| 4.428571
| 0.714286
| 0.451613
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 38
| 1
| 38
| 38
| 0.911765
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
e2963caf81b0c0a921af8396954ee43e0ce76e6e
| 2,606
|
py
|
Python
|
python/tests/test_u16.py
|
jameswilddev/Fau
|
8d42541740bd838a499f4ca3625701665b1bbcd0
|
[
"MIT"
] | null | null | null |
python/tests/test_u16.py
|
jameswilddev/Fau
|
8d42541740bd838a499f4ca3625701665b1bbcd0
|
[
"MIT"
] | 1
|
2020-06-08T23:03:28.000Z
|
2021-01-21T13:16:53.000Z
|
python/tests/test_u16.py
|
jameswilddev/Fau
|
8d42541740bd838a499f4ca3625701665b1bbcd0
|
[
"MIT"
] | null | null | null |
import unittest
from fau import U16
class TestU16(unittest.TestCase):
def test_length_negative_throws_error(self):
with self.assertRaises(OverflowError):
U16(-1)
def test_length_zero_returns_correct_length(self):
u16 = U16(0)
self.assertEqual(0, len(u16))
def test_length_correct(self):
u16 = U16(7)
self.assertEqual(7, len(u16))
def test_initialized_as_zeroes(self):
u16 = U16(7)
self.assertEqual(0, u16[0])
self.assertEqual(0, u16[1])
self.assertEqual(0, u16[2])
self.assertEqual(0, u16[3])
self.assertEqual(0, u16[4])
self.assertEqual(0, u16[5])
self.assertEqual(0, u16[6])
def test_get_negative_index_throws_error(self):
u16 = U16(7)
with self.assertRaises(IndexError):
u16[-1]
def test_get_index_out_of_range_throws_error(self):
u16 = U16(7)
with self.assertRaises(IndexError):
u16[7]
def test_set_negative_index_throws_error(self):
u16 = U16(7)
with self.assertRaises(IndexError):
u16[-1] = 42343
def test_set_index_out_of_range_throws_error(self):
u16 = U16(7)
with self.assertRaises(IndexError):
u16[7] = 42343
def test_set_negative_throws_error(self):
u16 = U16(7)
with self.assertRaises(OverflowError):
u16[3] = -1
def test_set_out_of_range_throws_error(self):
u16 = U16(7)
with self.assertRaises(OverflowError):
u16[3] = 65536
def test_set_zero(self):
u16 = U16(7)
u16[3] = 42343
u16[3] = 0
self.assertEqual(0, u16[0])
self.assertEqual(0, u16[1])
self.assertEqual(0, u16[2])
self.assertEqual(0, u16[3])
self.assertEqual(0, u16[4])
self.assertEqual(0, u16[5])
self.assertEqual(0, u16[6])
def test_set_positive(self):
u16 = U16(7)
u16[3] = 42343
self.assertEqual(0, u16[0])
self.assertEqual(0, u16[1])
self.assertEqual(0, u16[2])
self.assertEqual(42343, u16[3])
self.assertEqual(0, u16[4])
self.assertEqual(0, u16[5])
self.assertEqual(0, u16[6])
def test_set_maximum_value(self):
u16 = U16(7)
u16[3] = 65535
self.assertEqual(0, u16[0])
self.assertEqual(0, u16[1])
self.assertEqual(0, u16[2])
self.assertEqual(65535, u16[3])
self.assertEqual(0, u16[4])
self.assertEqual(0, u16[5])
self.assertEqual(0, u16[6])
| 24.584906
| 55
| 0.585188
| 348
| 2,606
| 4.224138
| 0.135057
| 0.306122
| 0.293878
| 0.336054
| 0.776871
| 0.738095
| 0.702721
| 0.67551
| 0.67551
| 0.67551
| 0
| 0.138137
| 0.291635
| 2,606
| 105
| 56
| 24.819048
| 0.65818
| 0
| 0
| 0.605263
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.486842
| 1
| 0.171053
| false
| 0
| 0.026316
| 0
| 0.210526
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2c401bacd2c9b03291f060c0852cb509f20d6aa8
| 20,102
|
py
|
Python
|
swagger_client/api/transaction_api.py
|
chbndrhnns/ahoi-client
|
8bd25f541c05af17c82904fa250272514b7971f2
|
[
"MIT"
] | null | null | null |
swagger_client/api/transaction_api.py
|
chbndrhnns/ahoi-client
|
8bd25f541c05af17c82904fa250272514b7971f2
|
[
"MIT"
] | null | null | null |
swagger_client/api/transaction_api.py
|
chbndrhnns/ahoi-client
|
8bd25f541c05af17c82904fa250272514b7971f2
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
[AHOI cookbook](/ahoi/docs/cookbook/index.html) [Data Privacy](/sandboxmanager/#/privacy) [Terms of Service](/sandboxmanager/#/terms) [Imprint](https://sparkassen-hub.com/impressum/) © 2016‐2017 Starfinanz - Ein Unternehmen der Finanz Informatik # noqa: E501
OpenAPI spec version: 2.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from swagger_client.api_client import ApiClient
class TransactionApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_transaction(self, access_id, account_id, transaction_id, **kwargs): # noqa: E501
"""Get transaction # noqa: E501
Returns the transaction identified by **transactionId** in relationship with **accountId**. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_transaction(access_id, account_id, transaction_id, async=True)
>>> result = thread.get()
:param async bool
:param int access_id: The **accessId** for the transaction to retrieve (required)
:param int account_id: The **accountId** for the transaction to retrieve (required)
:param int transaction_id: The **transactionId** for the pattern to retrieve (required)
:return: Transaction
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_transaction_with_http_info(access_id, account_id, transaction_id, **kwargs) # noqa: E501
else:
(data) = self.get_transaction_with_http_info(access_id, account_id, transaction_id, **kwargs) # noqa: E501
return data
def get_transaction_with_http_info(self, access_id, account_id, transaction_id, **kwargs): # noqa: E501
"""Get transaction # noqa: E501
Returns the transaction identified by **transactionId** in relationship with **accountId**. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_transaction_with_http_info(access_id, account_id, transaction_id, async=True)
>>> result = thread.get()
:param async bool
:param int access_id: The **accessId** for the transaction to retrieve (required)
:param int account_id: The **accountId** for the transaction to retrieve (required)
:param int transaction_id: The **transactionId** for the pattern to retrieve (required)
:return: Transaction
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['access_id', 'account_id', 'transaction_id'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_transaction" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'access_id' is set
if ('access_id' not in params or
params['access_id'] is None):
raise ValueError("Missing the required parameter `access_id` when calling `get_transaction`") # noqa: E501
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `get_transaction`") # noqa: E501
# verify the required parameter 'transaction_id' is set
if ('transaction_id' not in params or
params['transaction_id'] is None):
raise ValueError("Missing the required parameter `transaction_id` when calling `get_transaction`") # noqa: E501
collection_formats = {}
path_params = {}
if 'access_id' in params:
path_params['accessId'] = params['access_id'] # noqa: E501
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
if 'transaction_id' in params:
path_params['transactionId'] = params['transaction_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/accesses/{accessId}/accounts/{accountId}/transactions/{transactionId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Transaction', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_transactions(self, access_id, account_id, **kwargs): # noqa: E501
"""List transactions for account # noqa: E501
Retrieve all transactions for **accountId**. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_transactions(access_id, account_id, async=True)
>>> result = thread.get()
:param async bool
:param int access_id: The **accessId** for which to retrieve transactions. (required)
:param int account_id: The **accountId** for which to retrieve transactions. (required)
:param int max_age: Optional — indicates the maximum acceptable timeframe (in seconds) since the last refresh of the given account.
:param int limit: Optional — limit the number of returned transactions
:param int offset: Optional — skip the first **offset** transactions in result
:param str _from: Optional — only return transactions with booking date later than **from**; an ISO8601 Month(2014-11), Date (2014-11-17) or DateTime (2014-11-17T12:00:00Z)
:param str to: Optional — only return transactions with booking date prior or equal to **to**; an ISO8601 Date, Month or DateTime
:return: list[Transaction]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.list_transactions_with_http_info(access_id, account_id, **kwargs) # noqa: E501
else:
(data) = self.list_transactions_with_http_info(access_id, account_id, **kwargs) # noqa: E501
return data
def list_transactions_with_http_info(self, access_id, account_id, **kwargs): # noqa: E501
"""List transactions for account # noqa: E501
Retrieve all transactions for **accountId**. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_transactions_with_http_info(access_id, account_id, async=True)
>>> result = thread.get()
:param async bool
:param int access_id: The **accessId** for which to retrieve transactions. (required)
:param int account_id: The **accountId** for which to retrieve transactions. (required)
:param int max_age: Optional — indicates the maximum acceptable timeframe (in seconds) since the last refresh of the given account.
:param int limit: Optional — limit the number of returned transactions
:param int offset: Optional — skip the first **offset** transactions in result
:param str _from: Optional — only return transactions with booking date later than **from**; an ISO8601 Month(2014-11), Date (2014-11-17) or DateTime (2014-11-17T12:00:00Z)
:param str to: Optional — only return transactions with booking date prior or equal to **to**; an ISO8601 Date, Month or DateTime
:return: list[Transaction]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['access_id', 'account_id', 'max_age', 'limit', 'offset', '_from', 'to'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_transactions" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'access_id' is set
if ('access_id' not in params or
params['access_id'] is None):
raise ValueError("Missing the required parameter `access_id` when calling `list_transactions`") # noqa: E501
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `list_transactions`") # noqa: E501
collection_formats = {}
path_params = {}
if 'access_id' in params:
path_params['accessId'] = params['access_id'] # noqa: E501
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
query_params = []
if 'max_age' in params:
query_params.append(('max-age', params['max_age'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if '_from' in params:
query_params.append(('from', params['_from'])) # noqa: E501
if 'to' in params:
query_params.append(('to', params['to'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/accesses/{accessId}/accounts/{accountId}/transactions', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Transaction]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_transactions_for_pattern(self, access_id, account_id, pattern_id, **kwargs): # noqa: E501
"""List transactions for pattern # noqa: E501
Retrieve all transactions for **patternId**. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_transactions_for_pattern(access_id, account_id, pattern_id, async=True)
>>> result = thread.get()
:param async bool
:param int access_id: The **accessId** for which to retrieve transactions (required)
:param int account_id: The **accountId** for which to retrieve transactions (required)
:param int pattern_id: The **patternId** for which to retrieve transactions (required)
:param int max_age: Optional — indicates the maximum acceptable timeframe (in seconds) since the last refresh of the given account
:param int limit: Optional — limit the number of returned transactions
:param int offset: Optional — skip the first **offset** transactions in result
:param str _from: Optional — only return transactions with a booking date later than **from**; an ISO8601 Month(2014-11), Date (2014-11-17) or DateTime (2014-11-17T12:00:00Z)
:param str to: Optional — only return transactions with a booking date prior or equal to **to**; an ISO8601 Date, Month or DateTime
:return: list[Transaction]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.list_transactions_for_pattern_with_http_info(access_id, account_id, pattern_id, **kwargs) # noqa: E501
else:
(data) = self.list_transactions_for_pattern_with_http_info(access_id, account_id, pattern_id, **kwargs) # noqa: E501
return data
def list_transactions_for_pattern_with_http_info(self, access_id, account_id, pattern_id, **kwargs): # noqa: E501
"""List transactions for pattern # noqa: E501
Retrieve all transactions for **patternId**. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_transactions_for_pattern_with_http_info(access_id, account_id, pattern_id, async=True)
>>> result = thread.get()
:param async bool
:param int access_id: The **accessId** for which to retrieve transactions (required)
:param int account_id: The **accountId** for which to retrieve transactions (required)
:param int pattern_id: The **patternId** for which to retrieve transactions (required)
:param int max_age: Optional — indicates the maximum acceptable timeframe (in seconds) since the last refresh of the given account
:param int limit: Optional — limit the number of returned transactions
:param int offset: Optional — skip the first **offset** transactions in result
:param str _from: Optional — only return transactions with a booking date later than **from**; an ISO8601 Month(2014-11), Date (2014-11-17) or DateTime (2014-11-17T12:00:00Z)
:param str to: Optional — only return transactions with a booking date prior or equal to **to**; an ISO8601 Date, Month or DateTime
:return: list[Transaction]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['access_id', 'account_id', 'pattern_id', 'max_age', 'limit', 'offset', '_from', 'to'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_transactions_for_pattern" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'access_id' is set
if ('access_id' not in params or
params['access_id'] is None):
raise ValueError("Missing the required parameter `access_id` when calling `list_transactions_for_pattern`") # noqa: E501
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `list_transactions_for_pattern`") # noqa: E501
# verify the required parameter 'pattern_id' is set
if ('pattern_id' not in params or
params['pattern_id'] is None):
raise ValueError("Missing the required parameter `pattern_id` when calling `list_transactions_for_pattern`") # noqa: E501
collection_formats = {}
path_params = {}
if 'access_id' in params:
path_params['accessId'] = params['access_id'] # noqa: E501
if 'account_id' in params:
path_params['accountId'] = params['account_id'] # noqa: E501
if 'pattern_id' in params:
path_params['patternId'] = params['pattern_id'] # noqa: E501
query_params = []
if 'max_age' in params:
query_params.append(('max-age', params['max_age'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if '_from' in params:
query_params.append(('from', params['_from'])) # noqa: E501
if 'to' in params:
query_params.append(('to', params['to'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/accesses/{accessId}/accounts/{accountId}/transactionpatterns/{patternId}/transactions', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Transaction]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 48.791262
| 277
| 0.6373
| 2,429
| 20,102
| 5.093042
| 0.088514
| 0.04462
| 0.025463
| 0.028858
| 0.936949
| 0.926845
| 0.921833
| 0.911002
| 0.908092
| 0.889904
| 0
| 0.025154
| 0.266292
| 20,102
| 411
| 278
| 48.909976
| 0.812258
| 0.064571
| 0
| 0.768519
| 0
| 0
| 0.226565
| 0.049436
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.018519
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
2ca67f1bc3f698c3c18a4d055a63b7f18ee08e4e
| 8,640
|
py
|
Python
|
cauldron/test/ui/test_ui_start.py
|
JohnnyPeng18/cauldron
|
09120c2a4cef65df46f8c0c94f5d79395b3298cd
|
[
"MIT"
] | 90
|
2016-09-02T15:11:10.000Z
|
2022-01-02T11:37:57.000Z
|
cauldron/test/ui/test_ui_start.py
|
JohnnyPeng18/cauldron
|
09120c2a4cef65df46f8c0c94f5d79395b3298cd
|
[
"MIT"
] | 86
|
2016-09-23T16:52:22.000Z
|
2022-03-31T21:39:56.000Z
|
cauldron/test/ui/test_ui_start.py
|
JohnnyPeng18/cauldron
|
09120c2a4cef65df46f8c0c94f5d79395b3298cd
|
[
"MIT"
] | 261
|
2016-12-22T05:36:48.000Z
|
2021-11-26T12:40:42.000Z
|
from unittest.mock import MagicMock
from unittest.mock import patch
from cauldron import environ
from cauldron import ui
@patch('cauldron.ui.launcher')
@patch('cauldron.ui.connect')
@patch('cauldron.ui.environ.remote_connection')
@patch('cauldron.ui.environ.systems')
@patch('cauldron.ui.configs')
@patch('flask.Flask')
@patch('waitress.serve')
def test_start_defaults(
waitress_serve: MagicMock,
flask_constructor: MagicMock,
ui_configs: MagicMock,
environ_systems: MagicMock,
remote_connection: MagicMock,
connect: MagicMock,
launcher: MagicMock,
):
"""Should start the ui with default configuration."""
ui_configs.UI_APP_DATA = {}
ui_configs.LAUNCH_THREAD = None
connect._clean_url.return_value = 'foo'
connect.check_connection.return_value = environ.Response().fail().response
launcher.find_open_port.return_value = 1234
app = MagicMock()
flask_constructor.return_value = app
ui.start()
expected = {'port': 1234, 'host': 'localhost'}
assert expected == waitress_serve.call_args[1], """
Expect app run configuration to be {}
""".format(expected)
expected = {'port': 1234, 'host': None}
assert all(
item in ui_configs.UI_APP_DATA.items()
for item in expected.items()
), """
Expect configs.UI_APP_DATA to have {}
""".format(expected)
assert 0 == environ_systems.end.call_count, """
Expected no call to end the application execution process.
"""
assert ui_configs.LAUNCH_THREAD is not None, 'Expect a launch thread.'
@patch('cauldron.ui.launcher')
@patch('cauldron.ui.connect')
@patch('cauldron.ui.environ.remote_connection')
@patch('cauldron.ui.environ.systems')
@patch('cauldron.ui.configs')
@patch('flask.Flask')
def test_start_customized_basic(
flask_constructor: MagicMock,
ui_configs: MagicMock,
environ_systems: MagicMock,
remote_connection: MagicMock,
connect: MagicMock,
launcher: MagicMock,
):
"""Should start the ui with customized configuration."""
ui_configs.UI_APP_DATA = {}
ui_configs.LAUNCH_THREAD = None
connect._clean_url.return_value = 'foo'
connect.check_connection.return_value = environ.Response().fail().response
launcher.find_open_port.return_value = 1234
app = MagicMock()
flask_constructor.return_value = app
ui.start(port=4321, debug=True, public=True, basic=True)
expected = {'port': 4321, 'debug': True, 'host': '0.0.0.0'}
assert {'threaded': True, **expected} == app.run.call_args[1], """
Expect app run configuration to be {}
""".format(expected)
assert all(
item in ui_configs.UI_APP_DATA.items()
for item in expected.items()
), """
Expect configs.UI_APP_DATA to have {}
""".format(expected)
assert 0 == environ_systems.end.call_count, """
Expected no call to end the application execution process.
"""
assert ui_configs.LAUNCH_THREAD is None, """
Expect no launch thread when run in debug mode because
auto-reloading causes problems.
"""
@patch('cauldron.ui.launcher')
@patch('cauldron.ui.connect')
@patch('cauldron.ui.environ.remote_connection')
@patch('cauldron.ui.environ.systems')
@patch('cauldron.ui.configs')
@patch('flask.Flask')
@patch('waitress.serve')
def test_start_customized(
waitress_serve: MagicMock,
flask_constructor: MagicMock,
ui_configs: MagicMock,
environ_systems: MagicMock,
remote_connection: MagicMock,
connect: MagicMock,
launcher: MagicMock,
):
"""Should start the ui with customized configuration."""
ui_configs.UI_APP_DATA = {}
ui_configs.LAUNCH_THREAD = None
connect._clean_url.return_value = 'foo'
connect.check_connection.return_value = environ.Response().fail().response
launcher.find_open_port.return_value = 1234
app = MagicMock()
flask_constructor.return_value = app
ui.start(port=4321, public=True, debug=True)
expected = {'port': 4321, 'host': '0.0.0.0'}
assert expected == waitress_serve.call_args[1], """
Expect app run configuration to be {}
""".format(expected)
assert all(
item in ui_configs.UI_APP_DATA.items()
for item in expected.items()
), """
Expect configs.UI_APP_DATA to have {}
""".format(expected)
assert 0 == environ_systems.end.call_count, """
Expected no call to end the application execution process.
"""
assert ui_configs.LAUNCH_THREAD is None, """
Expect no launch thread when run in debug mode because
auto-reloading causes problems.
"""
@patch('cauldron.ui.launcher')
@patch('cauldron.ui.connect')
@patch('cauldron.ui.environ.remote_connection')
@patch('cauldron.ui.environ.systems')
@patch('cauldron.ui.configs')
@patch('flask.Flask')
def test_start_remote_connection_basic(
flask_constructor: MagicMock,
ui_configs: MagicMock,
environ_systems: MagicMock,
remote_connection: MagicMock,
connect: MagicMock,
launcher: MagicMock,
):
"""Should start the ui with a remote connection."""
ui_configs.UI_APP_DATA = {}
ui_configs.LAUNCH_THREAD = None
connect._clean_url.return_value = 'foo'
connect.check_connection.return_value = environ.Response()
launcher.find_open_port.return_value = 1234
app = MagicMock()
flask_constructor.return_value = app
ui.start(
port=4321,
debug=True,
host='bar',
connection_url='foo:8080',
basic=True,
)
expected = {'port': 4321, 'debug': True, 'host': 'bar'}
assert {'threaded': True, **expected} == app.run.call_args[1], """
Expect app run configuration to be {}
""".format(expected)
assert all(
item in ui_configs.UI_APP_DATA.items()
for item in expected.items()
), """
Expect configs.UI_APP_DATA to have {}
""".format(expected)
assert 0 == environ_systems.end.call_count, """
Expected no call to end the application execution process.
"""
assert ui_configs.LAUNCH_THREAD is None, """
Expect no launch thread when run in debug mode because
auto-reloading causes problems.
"""
assert remote_connection.url == 'foo'
assert remote_connection.active
@patch('cauldron.ui.launcher')
@patch('cauldron.ui.connect')
@patch('cauldron.ui.environ.remote_connection')
@patch('cauldron.ui.environ.systems')
@patch('cauldron.ui.configs')
@patch('flask.Flask')
def test_start_remote_connection_failed_basic(
flask_constructor: MagicMock,
ui_configs: MagicMock,
environ_systems: MagicMock,
remote_connection: MagicMock,
connect: MagicMock,
launcher: MagicMock,
):
"""Should start the ui with a remote connection."""
ui_configs.UI_APP_DATA = {}
ui_configs.LAUNCH_THREAD = None
connect._clean_url.return_value = 'foo'
connect.check_connection.return_value = environ.Response().fail().response
launcher.find_open_port.return_value = 1234
app = MagicMock()
flask_constructor.return_value = app
ui.start(
port=4321,
debug=True,
host='bar',
connection_url='foo:8080',
basic=True,
)
assert 0 == app.run.call_count, 'Expect no application to start.'
assert (1,) == environ_systems.end.call_args[0], """
Expected exit to be called one with a 1 returncode due to the
error in establishing a remote connection.
"""
@patch('cauldron.ui.launcher')
@patch('cauldron.ui.connect')
@patch('cauldron.ui.environ.remote_connection')
@patch('cauldron.ui.environ.systems')
@patch('cauldron.ui.configs')
@patch('flask.Flask')
@patch('waitress.serve')
def test_start_version(
waitress_serve: MagicMock,
flask_constructor: MagicMock,
ui_configs: MagicMock,
environ_systems: MagicMock,
remote_connection: MagicMock,
connect: MagicMock,
launcher: MagicMock,
):
"""Should show version information and then exit without error."""
ui_configs.UI_APP_DATA = {}
ui_configs.LAUNCH_THREAD = None
connect._clean_url.return_value = 'foo'
connect.check_connection.return_value = environ.Response().fail().response
launcher.find_open_port.return_value = 1234
app = MagicMock()
flask_constructor.return_value = app
ui.start(version=True)
assert (0,) == environ_systems.end.call_args[0], """
Expected exit to be called one with a zero returncode.
"""
| 32.977099
| 78
| 0.671412
| 1,047
| 8,640
| 5.357211
| 0.100287
| 0.051346
| 0.080228
| 0.039936
| 0.911749
| 0.908896
| 0.904974
| 0.904974
| 0.893029
| 0.893029
| 0
| 0.013187
| 0.210069
| 8,640
| 261
| 79
| 33.103448
| 0.808645
| 0.034954
| 0
| 0.865217
| 0
| 0
| 0.278199
| 0.046226
| 0
| 0
| 0
| 0
| 0.091304
| 1
| 0.026087
| false
| 0
| 0.017391
| 0
| 0.043478
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2cb259c738dc0c23660b946d5a1786366c511ff9
| 11,417
|
py
|
Python
|
preprocessing/processor.py
|
GPrathap/OpenBCIPython
|
0f5be167fb09d31c15885003eeafec8cdc08dbfa
|
[
"MIT"
] | 1
|
2021-11-07T12:01:08.000Z
|
2021-11-07T12:01:08.000Z
|
preprocessing/processor.py
|
GPrathap/OpenBCIPython
|
0f5be167fb09d31c15885003eeafec8cdc08dbfa
|
[
"MIT"
] | null | null | null |
preprocessing/processor.py
|
GPrathap/OpenBCIPython
|
0f5be167fb09d31c15885003eeafec8cdc08dbfa
|
[
"MIT"
] | 1
|
2020-10-15T08:35:01.000Z
|
2020-10-15T08:35:01.000Z
|
import json
import os
import seaborn as sb
from features.fft import FFT
from features.generic_type import EMG
from features.mean import Mean
from features.mfcc import MFCC
from features.zcr import ZCR
from manager import FeatureManager
import pandas as pd
from pandas import DataFrame, Series
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from utils.Audio import Audio
#
# sb.set(style="white", palette="muted")
#
# import random
# random.seed(20150420)
# from scipy.signal import butter, filtfilt
# import numpy as np
# import matplotlib.pyplot as plt
#
# import pandas as pd
# from pandas import DataFrame, Series
# from scipy.signal import butter, filtfilt
# from pandas import DataFrame, Series
# from scipy import signal
#
# class Clip:
#
# def __init__(self, config, buffer=None, filename=None, file_type=None):
# self.is_raw_data = eval(config["is_raw_data"])
# self.frame_size = int(config["window_size"])
# self.sampling_rate = int(config["sampling_rate"])
# # self.project_path = str(config["project_file_path"])
# self.project_path = "/home/runge/openbci/git/OpenBCI_Python"
# feature_config_file = self.project_path + "/features/config/feature_config.json"
# if self.is_raw_data:
# self.filename = os.path.basename(filename)
# self.path = os.path.abspath(filename)
# self.directory = os.path.dirname(self.path)
# self.category = self.directory.split('/')[-1]
# self.audio = Audio(self.path, file_type)
#
# else:
# self.audio = Audio(is_raw_data=self.is_raw_data, data=buffer)
#
# with open(feature_config_file) as feature_config:
# self.feature_config = json.load(feature_config)
# self.feature_config["sampling_rate"] = self.sampling_rate
# self.feature_config["frame_size"] = self.frame_size
# self.feature_config["is_raw_data"] = self.is_raw_data
#
#
# with self.audio as audio:
# self.featureManager = FeatureManager()
#
# self.featureManager.addRegisteredFeatures(FFT(self.audio, self.feature_config), "fft")
# self.featureManager.addRegisteredFeatures(EMG(self.audio, self.feature_config), "emg")
#
# self.featureManager.getRegisteredFeature("fft").compute_fft()
# # self.featureManager.getRegisteredFeature("emg").compute_hurst()
# self.featureManager.getRegisteredFeature("emg").compute_embed_seq()
# self.featureManager.getRegisteredFeature("emg").compute_bin_power()
# # self.featureManager.getRegisteredFeature("emg").compute_pfd()
# # self.featureManager.getRegisteredFeature("emg").compute_hfd()
# # self.featureManager.getRegisteredFeature("emg").compute_hjorth()
# # self.featureManager.getRegisteredFeature("emg").compute_spectral_entropy()
# # self.featureManager.getRegisteredFeature("emg").compute_svd_entropy()
# # self.featureManager.getRegisteredFeature("emg").compute_ap_entropy()
# # self.featureManager.getRegisteredFeature("emg").compute_samp_entropy()
#
#
# self.feature_list = self.featureManager.getRegisteredFeatures()
#
# def __repr__(self):
# return '<{0}/{1}>'.format(self.category, self.filename)
#
# def get_feature_vector(self):
# # self.featureManager.getRegisteredFeature("emg").get_hurst()
# return self.featureManager.getRegisteredFeature("fft").get_logamplitude()
#
# fsamp = 256
# tsample = 1 / fsamp
# f_low = 50
# f_high = 1
# order = 2
# channel_vector = [1,2, 3, 4, 5]
# n_ch = len(channel_vector)
# df = pd.read_csv("/home/runge/openbci/application.linux64/application.linux64/OpenBCI-RAW-right_strait_up_new.txt")
# df = df[channel_vector].dropna(axis=0)
#
# processed_signal = df.copy()
#
# b, a = butter(order, (order * f_low * 1.0) / fsamp * 1.0, btype='low')
# for i in range(0, n_ch):
# processed_signal.ix[:, i] = np.transpose(filtfilt(b, a, df.ix[:, i]))
#
# b1, a1 = butter(order, (order * f_high * 1.0) / fsamp * 1.0, btype='high')
# for i in range(0, n_ch):
# processed_signal.ix[:, i] = np.transpose(filtfilt(b1, a1, processed_signal.ix[:, i]))
#
# Wn = (np.array([58.0, 62.0]) / 500 * order).tolist()
# b3, a3 = butter(order, Wn, btype='stop')
# for i in range(0, n_ch):
# processed_signal.ix[:, i] = np.transpose(filtfilt(b3, a3, processed_signal.ix[:, i]))
#
# project_file_path = "/home/runge/openbci/git/OpenBCI_Python"
# config_file = project_file_path + "/config/config.json"
# with open(config_file) as config:
# config = json.load(config)
#
# start = 0
# end = 400
# plt.figure(figsize=(12, 8))
# for h in range(0, n_ch):
# plt.subplot(n_ch,1,h+1)
# clip = Clip(config, buffer=np.array(processed_signal.ix[:, h][start * fsamp:end * fsamp].tolist()))
# # f, Pxx_spec = signal.periodogram(processed_signal.ix[:, h][start * fsamp:end * fsamp], fsamp, 'flattop',
# # scaling='spectrum')
#
# # f, Pxx_spec = signal.welch(processed_signal.ix[:, h][start * fsamp:end * fsamp], fsamp, 'flattop', 128, scaling='spectrum')
# # wavelet = signal.ricker
# # widths = np.arange(1, 11)
# # cwtmatr = signal.cwt(processed_signal.ix[:, h][start * fsamp:end * fsamp], wavelet, widths)
# plt.plot(clip.feature_list.get("emg").get_bin_power())
# # plt.semilogy(fsamp, np.sqrt(Pxx_spec))
# # plt.ylim([1e-4, 1e1])
# plt.show()
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
import json
import os
from features.fft import FFT
from features.generic_type import EMG
from manager import FeatureManager
sb.set(style="white", palette="muted")
import random
random.seed(20150420)
class Clip:
def __init__(self, config, buffer=None, filename=None, file_type=None):
self.is_raw_data = eval(config["is_raw_data"])
self.frame_size = int(config["window_size"])
self.sampling_rate = int(config["sampling_rate"])
# self.project_path = str(config["project_file_path"])
self.project_path = "/home/runge/openbci/git/OpenBCI_Python"
feature_config_file = self.project_path + "/features/config/feature_config.json"
if self.is_raw_data:
self.filename = os.path.basename(filename)
self.path = os.path.abspath(filename)
self.directory = os.path.dirname(self.path)
self.category = self.directory.split('/')[-1]
self.audio = Audio(self.path, file_type)
else:
self.audio = Audio(is_raw_data=self.is_raw_data, data=buffer)
with open(feature_config_file) as feature_config:
self.feature_config = json.load(feature_config)
self.feature_config["sampling_rate"] = self.sampling_rate
self.feature_config["frame_size"] = self.frame_size
self.feature_config["is_raw_data"] = self.is_raw_data
with self.audio as audio:
self.featureManager = FeatureManager()
# self.featureManager.addRegisteredFeatures(FFT(self.audio, self.feature_config), "fft")
self.featureManager.addRegisteredFeatures(EMG(self.audio, self.feature_config), "emg")
# self.featureManager.getRegisteredFeature("fft").compute_fft()
# self.featureManager.getRegisteredFeature("emg").compute_hurst()
self.featureManager.getRegisteredFeature("emg").compute_embed_seq()
# self.featureManager.getRegisteredFeature("emg").compute_bin_power()
# self.featureManager.getRegisteredFeature("emg").compute_pfd()
# self.featureManager.getRegisteredFeature("emg").compute_hfd()
# self.featureManager.getRegisteredFeature("emg").compute_hjorth()
# self.featureManager.getRegisteredFeature("emg").compute_spectral_entropy()
# self.featureManager.getRegisteredFeature("emg").compute_svd_entropy()
# self.featureManager.getRegisteredFeature("emg").compute_ap_entropy()
self.featureManager.getRegisteredFeature("emg").compute_svd_entropy()
self.feature_list = self.featureManager.getRegisteredFeatures()
def __repr__(self):
return '<{0}/{1}>'.format(self.category, self.filename)
def get_feature_vector(self):
return self.featureManager.getRegisteredFeature("emg").get_svd_entropy()
# return self.featureManager.getRegisteredFeature("fft").get_logamplitude()
# return self.featureManager.getRegisteredFeature("fft").get_fft_spectrogram()
# fsamp = 1
#
# channel_vector = [1,2, 3, 4, 5]
# n_ch = len(channel_vector)
#
# # df1 = pd.read_csv("/home/runge/openbci/git/OpenBCI_Python/build/dataset/2noise_signal.csv")
# # df1 = df1.dropna(axis=0)
# #
# # df2 = pd.read_csv("/home/runge/openbci/git/OpenBCI_Python/build/dataset/2noise_reduced_signal.csv")
# # df2 = df2.dropna(axis=0)
# #
# # df3 = pd.read_csv("/home/runge/openbci/git/OpenBCI_Python/build/dataset/2reconstructed_signal.csv")
# # df3 = df3.dropna(axis=0)
# #
# # df4 = pd.read_csv("/home/runge/openbci/git/OpenBCI_Python/build/dataset/2feature_vector.csv")
# # df4 = df4.dropna(axis=0)
# #
# # df = []
# # df.append(df1)
# # df.append(df2)
# # df.append(df3)
# # df.append(df4)
# #
# # processed_signal = df.copy()
#
#
#
# project_file_path = "/home/runge/openbci/git/OpenBCI_Python"
# config_file = project_file_path + "/config/config.json"
# raw_reconstructed_signals = pd.read_csv(project_file_path+"/build/dataset2017-5-5_23-55-32new_bycept.csv").ix[:, 2:7].dropna()
# with open(config_file) as config:
#
# config = json.load(config)
# start = 100
# end = 200
#
# if end == 0:
# end = raw_reconstructed_signals.shape[0]
# x = np.arange(start,end, 1)
# fig = plt.figure(figsize=(10, 15))
# fig.subplots_adjust(hspace=.5)
# index = 1
# for h in range(0, 5):
# processed_signal = []
# ax = plt.subplot(10,2,index)
# input_signal = raw_reconstructed_signals.ix[:,h][start:end]
# ax.plot(x, input_signal)
# position=0
# for i in range(0, int((end-start)-int(config['window_size'])-1)):
# clip = Clip(config, buffer=np.array(input_signal[position:position+int(config['window_size'])].tolist()))
# processed_signal.append(clip.get_feature_vector())
# position+=1
#
# index += 1
# ax = plt.subplot(10, 2, index)
# index += 1
# ax.plot(processed_signal)
#
# # f, Pxx_spec = signal.periodogram(processed_signal.ix[:, h][start * fsamp:end * fsamp], fsamp, 'flattop',
# # scaling='spectrum')
#
# # f, Pxx_spec = signal.welch(processed_signal.ix[:, h][start * fsamp:end * fsamp], fsamp, 'flattop', 128, scaling='spectrum')
# # wavelet = signal.ricker
# # widths = np.arange(1, 11)
# # cwtmatr = signal.cwt(processed_signal.ix[:, h][start * fsamp:end * fsamp], wavelet, widths)
# # plt.plot(raw_reconstructed_signals[h].ix[:,0][start * fsamp:end * fsamp])
# # plt.semilogy(fsamp, np.sqrt(Pxx_spec))
# # plt.ylim([1e-4, 1e1])
# plt.show()
| 33.778107
| 135
| 0.644828
| 1,392
| 11,417
| 5.112787
| 0.158764
| 0.08852
| 0.144162
| 0.126739
| 0.800478
| 0.782633
| 0.745539
| 0.713362
| 0.708304
| 0.694675
| 0
| 0.019077
| 0.2103
| 11,417
| 337
| 136
| 33.878338
| 0.770297
| 0.73373
| 0
| 0.188679
| 0
| 0
| 0.064103
| 0.027106
| 0
| 0
| 0
| 0
| 0
| 1
| 0.056604
| false
| 0
| 0.396226
| 0.037736
| 0.509434
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
e2d897be6ebcc687de1f3b0f5a1da77fb87a4148
| 33,223
|
py
|
Python
|
alsmicroct/data_management.py
|
hbar/alsTomographyTools
|
ec1edd1477367a57ee94e806134aee92e57db977
|
[
"MIT"
] | null | null | null |
alsmicroct/data_management.py
|
hbar/alsTomographyTools
|
ec1edd1477367a57ee94e806134aee92e57db977
|
[
"MIT"
] | null | null | null |
alsmicroct/data_management.py
|
hbar/alsTomographyTools
|
ec1edd1477367a57ee94e806134aee92e57db977
|
[
"MIT"
] | null | null | null |
import os
import sys
import time
import logging
logging.basicConfig(level=logging.INFO, stream=sys.stdout,format='%(message)s')
import numpy as np # fundamental numeric operations package
import tomopy # tomographic reconstrcution package
import dxchange
import h5py
import glob
import getpass # allows commandline password input
import requests # tools for web requests/communication with online APIs
# Create spot session class
# This class includes all functions for authenticating and communcating with SPOT API
class SpotSession():
# =============================================================================
# Login
# tested, works properly
def __init__ (self,username='default'):
"""
Prompts user for username and password
"""
self.URL_authentication = "https://portal-auth.nersc.gov/als/auth"
self.username = username
self.spot_username = raw_input("username:")
spot_password = getpass.getpass()
if username == 'default': # if no additional usermane is given, spot username is stored to be used in file paths
self.spot_username=spot_username
s = requests.Session()
r = s.post(self.URL_authentication,{"username":self.spot_username,"password":spot_password})
self.session = s
"""
POST
URL:
https://portal-auth.nersc.gov/als/auth
EXAMPLE:
% curl -k -c cookies.txt -X POST -d "username=[your_username]&password=[your_password]" https://portal-auth.nersc.gov/als/auth
"""
# =============================================================================
# Check Authentication
# tested, works properly
def check_authentication(self):
r = self.session.get(self.URL_authentication)
#return r.json()
return r.json()['auth']
def authentication(self):
r = s.get(self.URL_authentication)
if r.json()['auth'] == False:
print "Authentication required to start a new session"
spot_username = raw_input("username:")
spot_password = getpass.getpass()
s = requests.Session()
r = s.post(self.URL_authentication,{"username":spot_username,"password":spot_password})
self.session = s
r = s.get(self.URL_authentication)
return r.json()
# =============================================================================
# Close session
# *** DOES NOT WORK ***
def close(self):
self.session.close()
r = self.session.get(self.URL_authentication)
return r.json()
# =============================================================================
# Search Datasets
# tested, works properly, returns list of json oblejcts
def search(self,
query,
limitnum = 10, # number of results to show
skipnum = 0, # number of results to skip
sortterm = "fs.stage_date", # database field on which to sort (commonly fs.stage_date or appmetadata.sdate)
sorttype = "desc"): # sorttype: desc or asc
self.URL_search = "https://portal-auth.nersc.gov/als/hdf/search"
self.PARAMS_search = {"limitnum": limitnum, "skipnum":skipnum, "sortterm": sortterm, "sorttype": sorttype, "search": search}
r = self.session.get(url=self.URL_search,params=self.PARAMS_search)
return r.json() # returns list of JSON objects containing search results
"""
GET
URL:
https://portal-auth.nersc.gov/als/hdf/search
Arguments:
limitnum: number of results to show
skipnum: number of results to skip
sortterm: database field on which to sort (commonly fs.stage_date or appmetadata.sdate)
sorttype: desc or asc
search: query
Result:
JSON List
EXAMPLE:
% curl -k -b cookies.txt -X GET "https://portal-auth.nersc.gov/als/hdf/search?skipnum=0&limitnum=10&sortterm=fs.stage_date&sorttype=desc&search=end_station=bl832"
"""
# =============================================================================
# Find Derived Datasets (norm, sino, gridrec, imgrec) from raw dataset
# Tested, works, returns list of json objects
def derived_datasets(self,dataset):
self.URL_DerivedDatasets = "https://portal-auth.nersc.gov/als/hdf/dataset"
self.PARAMS_DerivedDatasets = {"dataset":dataset}
r = self.session.get(self.URL_DerivedDatasets,params=self.PARAMS_DerivedDatasets)
return r.json()
"""
GET
URL:
https://portal-auth.nersc.gov/als/hdf/dataset
Arguments:
dataset: raw dataset
Result:
JSON List
EXAMPLE:
% curl -k -b cookies.txt -X GET "https://portal-auth.nersc.gov/als/hdf/dataset?dataset=20130713_185717_Chilarchaea_quellon_F_9053427_IKI_"
"""
# =============================================================================
# Takes dataset name and formats for use in API calls
# tested, used in class methods that require [dataset] entry in URL
def formatPath(self,dataset,username='default'):
dataset = dataset.strip(".h5")
if (username=='default' or username==None):
username = self.spot_username
filename = dataset
elif "/" in dataset: # determine if dataset contains username and splits
# 'username/filename' becomes two separate strings: username and filename
datasplit = dataset.split("/")
username = datasplit[-2]
filename = datasplit[-1]
else:
filename=dataset
return filename,username
# =============================================================================
# View Attributes for Single Dataset and Image Within Dataset
def attributes(self,dataset,username=None):
self.URL_attributes = "https://portal-auth.nersc.gov/als/hdf/attributes/als/bl832/"
dataset,username = self.formatPath(dataset,username=username)
URLstring = self.URL_attributes+username+"/"+dataset+"/raw/"+dataset+".h5"
r = self.session.get(url=URLstring,params={"group":"/"})
return r.json()
"""
GET
URL:
https://portal-auth.nersc.gov/als/hdf/attributes/[dataset]
Arguments:
group: path in hdf5 file. Set to "/" for overall attributes
Result:
JSON Array
EXAMPLE:
% curl -k -b cookies.txt -X GET "https://portal-auth.nersc.gov/als/hdf/attributes/als/bl832/hmwood/20130713_185717_Chilarchaea_quellon_F_9053427_IKI_/raw/20130713_185717_Chilarchaea_quellon_F_9053427_IKI_.h5?group=/"
"""
# =============================================================================
# List Images Within Dataset
# *** NOT TESTED ***
def list_images(self,dataset):
dataset =dataset.strip(".h5")
self.URL_listImages = "https://portal-auth.nersc.gov/als/hdf/listimages/als/bl832/"
URL
r = self.session.get(self.URL_listImages+dataset)
return r.json()
"""
GET
URL:
https://portal-auth.nersc.gov/als/hdf/listimages/[dataset]
Arguments:
None
Result:
JSON List (paths to images within the HDF5 file)
EXAMPLE:
% curl -k -b cookies.txt -X GET "https://portal-auth.nersc.gov/als/hdf/listimages/als/bl832/hmwood/20130713_185717_Chilarchaea_quellon_F_9053427_IKI_/raw/20130713_185717_Chilarchaea_quellon_F_9053427_IKI_.h5"
"""
# =============================================================================
# Stage Dataset From Tape to Disk if Required
# Tested, works properly
# ... Not sure how to figure figure out when staging is complete.
def stage(self,dataset,username='default'):
fileName,username = self.formatPath(dataset,username)
self.URL_stage = "https://portal-auth.nersc.gov/als/hdf/stageifneeded/als/bl832/"
URL_string = self.URL_stage+username+"/"+fileName+"/raw/"+fileName+".h5"
r = self.session.get(url=URL_string)
return r
"""
GET
URL:
https://portal-auth.nersc.gov/als/hdf/stageifneeded/[dataset]
Arguments:
None
Result:
JSON Array
EXAMPLE:
% curl -k -b cookies.txt -X GET "https://portal-auth.nersc.gov/als/hdf/stageifneeded/als/bl832/hmwood/20130713_185717_Chilarchaea_quellon_F_9053427_IKI_/raw/20130713_185717_Chilarchaea_quellon_F_9053427_IKI_.h5"
"""
# =============================================================================
# Download Dataset
# tested, downloads large file, haven't opened file to see if it worked
def download(self,dataset,username='default',downloadPath='default',downloadName='default'):
filename,username = self.formatPath(dataset,username=username)# process input path
downloadName = downloadName.strip('.h5') # remove .h5 from output file name
self.URL_download = "https://portal-auth.nersc.gov/als/hdf/download/als/bl832/"
if downloadPath=='default':
downloadPath = "./"
if downloadName=='default':
downloadName = filename
if downloadPath[-1] != "/": # add "/" to output path if it is not included
downloadPath = downloadPath+"/"
URL_string = self.URL_download+username+"/"+filename+"/raw/"+filename+".h5"
# print(URL_string)
r = self.session.get(URL_string)
fileLocation = downloadPath+downloadName+".h5"
with open(fileLocation, "wb") as location:
location.write(r.content)
print 'download complete: ' + fileLocation
return fileLocation
"""
GET
URL:
https://portal-auth.nersc.gov/als/hdf/download/[dataset]
Arguments:
None
Result:
H5 File
EXAMPLE:
% curl -k -b cookies.txt -X GET "https://portal-auth.nersc.gov/als/hdf/download/als/bl832/hmwood/20130713_185717_Chilarchaea_quellon_F_9053427_IKI_/raw/20130713_185717_Chilarchaea_quellon_F_9053427_IKI_.h" > file.h5
"""
# =============================================================================
# Download Rawdata For Individual Image
# *** NOT TESTED ***
def download_image(self,dataset,username=None,number=0):
if (type(number)==int or type(number)==float):
numstring = "_"+str(int(number)).zfill(4)
filename,username = self.deconstruct_DatasetPath(dataset,username=username)
self.URL_download = "https://portal-auth.nersc.gov/als/hdf/download/als/bl832/"
URL_string = URL_download+username+"/"+filename+"-"+filename+".h5" #NOT SURE WHAT TO DO HERE
r = self.session.get(URL_string)
return r.json()
"""
GET
URL:
https://portal-auth.nersc.gov/als/hdf/rawdata/[dataset]
Arguments:
group: path in HDF5 file to image
Result:
JSON Array
EXAMPLE:
% curl -k -b cookies.txt -X GET "https://portal-auth.nersc.gov/als/hdf/rawdata/als/bl832/hmwood/20130713_185717_Chilarchaea_quellon_F_9053427_IKI_/norm/20130713_185717_Chilarchaea_quellon_F_9053427_IKI_-norm-20130714_192637.h5?group=/20130713_185717_Chilarchaea_quellon_F_9053427_IKI_/20130713_185717_Chilarchaea_quellon_F_9053427_IKI__0000_0640.tif"
"""
# =============================================================================
# Get Download URLs for .tif and .png files for individual image
# *** Not written ***
# *** NOT TESTED ***
"""
GET
URL:
https://portal-auth.nersc.gov/als/hdf/image/[dataset]
Arguments:
group: path in HDF5 file to image
Result:
JSON Array
EXAMPLE:
% curl -k -b cookies.txt -X GET "https://portal-auth.nersc.gov/als/hdf/image/als/bl832/hmwood/20130713_185717_Chilarchaea_quellon_F_9053427_IKI_/norm/20130713_185717_Chilarchaea_quellon_F_9053427_IKI_-norm-20130714_192637.h5?group=/20130713_185717_Chilarchaea_quellon_F_9053427_IKI_/20130713_185717_Chilarchaea_quellon_F_9053427_IKI__0000_0640.tif"
"""
# =============================================================================
# Run TomoPy on an existing dataset
# *** Not written ***
# *** NOT TESTED ***
"""
GET
URL:
https://portal-auth.nersc.gov/als/hdf/tomopyjob
Arguments:
dataset: raw dataset
Result:
JSON List
EXAMPLE:
% curl -k -b cookies.txt -X GET "https://portal-auth.nersc.gov/als/hdf/tomopyjob?dataset=20130713_185717_Chilarchaea_quellon_F_9053427_IKI_"
"""
# =============================================================================
#
"""
Functions for NERSC
"""
# =============================================================================
def list_from_txt(TextFilePath='../filepath/UnformatedFileList_Test.txt',comment='#'):
"""
Converts unformatted .txt file with \n separated file names into python list
"""
if '.py' in TextFilePath: # if file path is actually a python script, run script instead
execfile(TextFilePath)
else:
textFile = open(TextFilePath,'r') # open text file
data = textFile.readlines() #read lines of text into list of strings
fileList = []
for i in range(len(data)):
data[i] = data[i].strip(" ") # remove spaces
data[i]=data[i].strip("\n") # remove returns
if data[i][0] != comment:
fileList.append(data[i]) # keep lines that are not commented
return(fileList)
# =============================================================================
def list_h5_files(searchDir):
"""
Finds all .h5 files in the search directory
"""
if searchDir[-1] != '/':
searchDir = searchDir+"/"
h5_list = glob.glob(searchDir+"*.h5")
return(h5_list)
# =============================================================================
NERSC_DefaultPath="/global/project/projectdirs/als/spade/warehouse/als/bl832/"
userDefault = "hsbarnard"
def NERSC_ArchivePath(filename,useraccount=userDefault,archivepath=NERSC_DefaultPath):
'''
Generates path to raw tomography projection data in NERSC Archives
Input list of file names, returns list of NERSC paths
'''
# Convert filename to list type if only one file name is given
if type(filename) != list:
filename=[filename]
# Generate list of NERSC Paths from fileList
pathOut=[]
for i in range(len(filename)):
#pathOut.append( archivepath +useraccount+ "/" + filename[i] + "/raw/" + filename[i]+".h5" )
pathOut.append( archivepath +useraccount+ "/" + filename[i] + "/raw/")
print pathOut
return pathOut
# =============================================================================
def NERSC_StageData(filename,username='default'):
'''
curl command sent to spot.nersc to stage data if stored on tape
requires credentials. Run the following command
> curl -k -c cookies.txt -X POST -d "username=[your_username]&password=[your_password]" https://portal-auth.nersc.gov/als/auth
for more info: http://spot.nersc.gov/api.php
'''
#Prompts user for username and password
URL_authentication = "https://portal-auth.nersc.gov/als/auth"
spot_username = raw_input("username:")
spot_password = getpass.getpass()
if username == 'default': # if no additional usermane is given, spot username is stored to be used in file paths
username=spot_username
s = requests.Session()
r = s.post(URL_authentication,{"username":spot_username,"password":spot_password})
# Convert filename to list type if only one file name is given
if type(filename) != list:
filename=[filename]
baseURL = "https://portal-auth.nersc.gov/als/hdf/stageifneeded/als/bl832/"
r_list=[]
for i in range(len(filename)):
r = s.get(baseURL + useraccount +"/"+ filename[i]+"/raw/"+filename[i]+".h5")
print(command_string)
r_list.append(r.json())
return r_list # return json
# =============================================================================
def NERSC_RetreiveData(filename,
username,
destinationpath,
archivepath=NERSC_DefaultPath):
'''
Downloads raw tomography projection data in NERSC from NERSC Archives
for a list of file names
'''
# Convert filename to list type if only one file name is given
if type(filename) != list:
filename=[filename]
# Generate file paths and destination paths
filePathIn = []
filePathOut = []
for i in range(len(filename)):
print(archivepath,useraccount,filename[i])
filePathIn.append( archivepath +useraccount+ "/" + filename[i] + "/raw/" + filename[i]+".h5" )
filePathOut.append( destinationpath + filename[i] + ".h5")
logging.info("file path list complete"); print filePathIn
logging.info("destination path list complete"); print filePathOut
# Copy Files to desintation
for i in range(len(filename)):
logging.info("begin transfer: "+filePathIn[i])
os.system("cp " + filePathIn[i] + " " + filePathOut[i])
logging.info("transfer complete: ")
=======
import os
import sys
import time
import logging
logging.basicConfig(level=logging.INFO, stream=sys.stdout,format='%(message)s')
import numpy as np # fundamental numeric operations package
import tomopy # tomographic reconstrcution package
import dxchange
import h5py
import glob
import getpass # allows commandline password input
import requests # tools for web requests/communication with online APIs
# Create spot session class
# This class includes all functions for authenticating and communcating with SPOT API
class SpotSession():
# =============================================================================
# Login
# tested, works properly
def __init__ (self,username='default'):
"""
Prompts user for username and password
"""
self.URL_authentication = "https://portal-auth.nersc.gov/als/auth"
self.username = username
self.spot_username = raw_input("username:")
spot_password = getpass.getpass()
if username == 'default': # if no additional usermane is given, spot username is stored to be used in file paths
self.spot_username=spot_username
s = requests.Session()
r = s.post(self.URL_authentication,{"username":self.spot_username,"password":spot_password})
self.session = s
"""
POST
URL:
https://portal-auth.nersc.gov/als/auth
EXAMPLE:
% curl -k -c cookies.txt -X POST -d "username=[your_username]&password=[your_password]" https://portal-auth.nersc.gov/als/auth
"""
# =============================================================================
# Check Authentication
# tested, works properly
def check_authentication(self):
r = self.session.get(self.URL_authentication)
#return r.json()
return r.json()['auth']
def authentication(self):
r = s.get(self.URL_authentication)
if r.json()['auth'] == False:
print("Authentication required to start a new session")
spot_username = raw_input("username:")
spot_password = getpass.getpass()
s = requests.Session()
r = s.post(self.URL_authentication,{"username":spot_username,"password":spot_password})
self.session = s
r = s.get(self.URL_authentication)
return r.json()
# =============================================================================
# Close session
# *** DOES NOT WORK ***
def close(self):
self.session.close()
r = self.session.get(self.URL_authentication)
return r.json()
# =============================================================================
# Search Datasets
# tested, works properly, returns list of json oblejcts
def search(self,
query,
limitnum = 10, # number of results to show
skipnum = 0, # number of results to skip
sortterm = "fs.stage_date", # database field on which to sort (commonly fs.stage_date or appmetadata.sdate)
sorttype = "desc"): # sorttype: desc or asc
self.URL_search = "https://portal-auth.nersc.gov/als/hdf/search"
self.PARAMS_search = {"limitnum": limitnum, "skipnum":skipnum, "sortterm": sortterm, "sorttype": sorttype, "search": search}
r = self.session.get(url=self.URL_search,params=self.PARAMS_search)
return r.json() # returns list of JSON objects containing search results
"""
GET
URL:
https://portal-auth.nersc.gov/als/hdf/search
Arguments:
limitnum: number of results to show
skipnum: number of results to skip
sortterm: database field on which to sort (commonly fs.stage_date or appmetadata.sdate)
sorttype: desc or asc
search: query
Result:
JSON List
EXAMPLE:
% curl -k -b cookies.txt -X GET "https://portal-auth.nersc.gov/als/hdf/search?skipnum=0&limitnum=10&sortterm=fs.stage_date&sorttype=desc&search=end_station=bl832"
"""
# =============================================================================
# Find Derived Datasets (norm, sino, gridrec, imgrec) from raw dataset
# Tested, works, returns list of json objects
def derived_datasets(self,dataset):
self.URL_DerivedDatasets = "https://portal-auth.nersc.gov/als/hdf/dataset"
self.PARAMS_DerivedDatasets = {"dataset":dataset}
r = self.session.get(self.URL_DerivedDatasets,params=self.PARAMS_DerivedDatasets)
return r.json()
"""
GET
URL:
https://portal-auth.nersc.gov/als/hdf/dataset
Arguments:
dataset: raw dataset
Result:
JSON List
EXAMPLE:
% curl -k -b cookies.txt -X GET "https://portal-auth.nersc.gov/als/hdf/dataset?dataset=20130713_185717_Chilarchaea_quellon_F_9053427_IKI_"
"""
# =============================================================================
# Takes dataset name and formats for use in API calls
# tested, used in class methods that require [dataset] entry in URL
def formatPath(self,dataset,username='default'):
dataset = dataset.strip(".h5")
if (username=='default' or username==None):
username = self.spot_username
filename = dataset
elif "/" in dataset: # determine if dataset contains username and splits
# 'username/filename' becomes two separate strings: username and filename
datasplit = dataset.split("/")
username = datasplit[-2]
filename = datasplit[-1]
else:
filename=dataset
return filename,username
# =============================================================================
# View Attributes for Single Dataset and Image Within Dataset
def attributes(self,dataset,username=None):
self.URL_attributes = "https://portal-auth.nersc.gov/als/hdf/attributes/als/bl832/"
dataset,username = self.formatPath(dataset,username=username)
URLstring = self.URL_attributes+username+"/"+dataset+"/raw/"+dataset+".h5"
r = self.session.get(url=URLstring,params={"group":"/"})
return r.json()
"""
GET
URL:
https://portal-auth.nersc.gov/als/hdf/attributes/[dataset]
Arguments:
group: path in hdf5 file. Set to "/" for overall attributes
Result:
JSON Array
EXAMPLE:
% curl -k -b cookies.txt -X GET "https://portal-auth.nersc.gov/als/hdf/attributes/als/bl832/hmwood/20130713_185717_Chilarchaea_quellon_F_9053427_IKI_/raw/20130713_185717_Chilarchaea_quellon_F_9053427_IKI_.h5?group=/"
"""
# =============================================================================
# List Images Within Dataset
# *** NOT TESTED ***
def list_images(self,dataset):
dataset =dataset.strip(".h5")
self.URL_listImages = "https://portal-auth.nersc.gov/als/hdf/listimages/als/bl832/"
URL
r = self.session.get(self.URL_listImages+dataset)
return r.json()
"""
GET
URL:
https://portal-auth.nersc.gov/als/hdf/listimages/[dataset]
Arguments:
None
Result:
JSON List (paths to images within the HDF5 file)
EXAMPLE:
% curl -k -b cookies.txt -X GET "https://portal-auth.nersc.gov/als/hdf/listimages/als/bl832/hmwood/20130713_185717_Chilarchaea_quellon_F_9053427_IKI_/raw/20130713_185717_Chilarchaea_quellon_F_9053427_IKI_.h5"
"""
# =============================================================================
# Stage Dataset From Tape to Disk if Required
# Tested, works properly
# ... Not sure how to figure figure out when staging is complete.
def stage(self,dataset,username='default'):
fileName,username = self.formatPath(dataset,username)
self.URL_stage = "https://portal-auth.nersc.gov/als/hdf/stageifneeded/als/bl832/"
URL_string = self.URL_stage+username+"/"+fileName+"/raw/"+fileName+".h5"
r = self.session.get(url=URL_string)
return r
"""
GET
URL:
https://portal-auth.nersc.gov/als/hdf/stageifneeded/[dataset]
Arguments:
None
Result:
JSON Array
EXAMPLE:
% curl -k -b cookies.txt -X GET "https://portal-auth.nersc.gov/als/hdf/stageifneeded/als/bl832/hmwood/20130713_185717_Chilarchaea_quellon_F_9053427_IKI_/raw/20130713_185717_Chilarchaea_quellon_F_9053427_IKI_.h5"
"""
# =============================================================================
# Download Dataset
# tested, downloads large file, haven't opened file to see if it worked
def download(self,dataset,username='default',downloadPath='default',downloadName='default'):
filename,username = self.formatPath(dataset,username=username)# process input path
downloadName = downloadName.strip('.h5') # remove .h5 from output file name
self.URL_download = "https://portal-auth.nersc.gov/als/hdf/download/als/bl832/"
if downloadPath=='default':
downloadPath = "./"
if downloadName=='default':
downloadName = filename
if downloadPath[-1] != "/": # add "/" to output path if it is not included
downloadPath = downloadPath+"/"
URL_string = self.URL_download+username+"/"+filename+"/raw/"+filename+".h5"
# print(URL_string)
r = self.session.get(URL_string)
fileLocation = downloadPath+downloadName+".h5"
with open(fileLocation, "wb") as location:
location.write(r.content)
print('download complete: ' + fileLocation)
return fileLocation
"""
GET
URL:
https://portal-auth.nersc.gov/als/hdf/download/[dataset]
Arguments:
None
Result:
H5 File
EXAMPLE:
% curl -k -b cookies.txt -X GET "https://portal-auth.nersc.gov/als/hdf/download/als/bl832/hmwood/20130713_185717_Chilarchaea_quellon_F_9053427_IKI_/raw/20130713_185717_Chilarchaea_quellon_F_9053427_IKI_.h" > file.h5
"""
# =============================================================================
# Download Rawdata For Individual Image
# *** NOT TESTED ***
def download_image(self,dataset,username=None,number=0):
if (type(number)==int or type(number)==float):
numstring = "_"+str(int(number)).zfill(4)
filename,username = self.deconstruct_DatasetPath(dataset,username=username)
self.URL_download = "https://portal-auth.nersc.gov/als/hdf/download/als/bl832/"
URL_string = URL_download+username+"/"+filename+"-"+filename+".h5" #NOT SURE WHAT TO DO HERE
r = self.session.get(URL_string)
return r.json()
"""
GET
URL:
https://portal-auth.nersc.gov/als/hdf/rawdata/[dataset]
Arguments:
group: path in HDF5 file to image
Result:
JSON Array
EXAMPLE:
% curl -k -b cookies.txt -X GET "https://portal-auth.nersc.gov/als/hdf/rawdata/als/bl832/hmwood/20130713_185717_Chilarchaea_quellon_F_9053427_IKI_/norm/20130713_185717_Chilarchaea_quellon_F_9053427_IKI_-norm-20130714_192637.h5?group=/20130713_185717_Chilarchaea_quellon_F_9053427_IKI_/20130713_185717_Chilarchaea_quellon_F_9053427_IKI__0000_0640.tif"
"""
# =============================================================================
# Get Download URLs for .tif and .png files for individual image
# *** Not written ***
# *** NOT TESTED ***
"""
GET
URL:
https://portal-auth.nersc.gov/als/hdf/image/[dataset]
Arguments:
group: path in HDF5 file to image
Result:
JSON Array
EXAMPLE:
% curl -k -b cookies.txt -X GET "https://portal-auth.nersc.gov/als/hdf/image/als/bl832/hmwood/20130713_185717_Chilarchaea_quellon_F_9053427_IKI_/norm/20130713_185717_Chilarchaea_quellon_F_9053427_IKI_-norm-20130714_192637.h5?group=/20130713_185717_Chilarchaea_quellon_F_9053427_IKI_/20130713_185717_Chilarchaea_quellon_F_9053427_IKI__0000_0640.tif"
"""
# =============================================================================
# Run TomoPy on an existing dataset
# *** Not written ***
# *** NOT TESTED ***
"""
GET
URL:
https://portal-auth.nersc.gov/als/hdf/tomopyjob
Arguments:
dataset: raw dataset
Result:
JSON List
EXAMPLE:
% curl -k -b cookies.txt -X GET "https://portal-auth.nersc.gov/als/hdf/tomopyjob?dataset=20130713_185717_Chilarchaea_quellon_F_9053427_IKI_"
"""
# =============================================================================
#
"""
Functions for NERSC
"""
# =============================================================================
def list_from_txt(TextFilePath='../filepath/UnformatedFileList_Test.txt',comment='#'):
"""
Converts unformatted .txt file with \n separated file names into python list
"""
if '.py' in TextFilePath: # if file path is actually a python script, run script instead
execfile(TextFilePath)
else:
textFile = open(TextFilePath,'r') # open text file
data = textFile.readlines() #read lines of text into list of strings
fileList = []
for i in range(len(data)):
data[i] = data[i].strip(" ") # remove spaces
data[i]=data[i].strip("\n") # remove returns
if data[i][0] != comment:
fileList.append(data[i]) # keep lines that are not commented
return(fileList)
# =============================================================================
def list_h5_files(searchDir):
"""
Finds all .h5 files in the search directory
"""
if searchDir[-1] != '/':
searchDir = searchDir+"/"
h5_list = glob.glob(searchDir+"*.h5")
return(h5_list)
# =============================================================================
NERSC_DefaultPath="/global/project/projectdirs/als/spade/warehouse/als/bl832/"
userDefault = "hsbarnard"
def NERSC_ArchivePath(filename,useraccount=userDefault,archivepath=NERSC_DefaultPath):
'''
Generates path to raw tomography projection data in NERSC Archives
Input list of file names, returns list of NERSC paths
'''
# Convert filename to list type if only one file name is given
if type(filename) != list:
filename=[filename]
# Generate list of NERSC Paths from fileList
pathOut=[]
for i in range(len(filename)):
#pathOut.append( archivepath +useraccount+ "/" + filename[i] + "/raw/" + filename[i]+".h5" )
pathOut.append( archivepath +useraccount+ "/" + filename[i] + "/raw/")
print(pathOut)
return pathOut
# =============================================================================
def NERSC_StageData(filename,username='default'):
'''
curl command sent to spot.nersc to stage data if stored on tape
requires credentials. Run the following command
> curl -k -c cookies.txt -X POST -d "username=[your_username]&password=[your_password]" https://portal-auth.nersc.gov/als/auth
for more info: http://spot.nersc.gov/api.php
'''
#Prompts user for username and password
URL_authentication = "https://portal-auth.nersc.gov/als/auth"
spot_username = raw_input("username:")
spot_password = getpass.getpass()
if username == 'default': # if no additional usermane is given, spot username is stored to be used in file paths
username=spot_username
s = requests.Session()
r = s.post(URL_authentication,{"username":spot_username,"password":spot_password})
# Convert filename to list type if only one file name is given
if type(filename) != list:
filename=[filename]
baseURL = "https://portal-auth.nersc.gov/als/hdf/stageifneeded/als/bl832/"
r_list=[]
for i in range(len(filename)):
r = s.get(baseURL + useraccount +"/"+ filename[i]+"/raw/"+filename[i]+".h5")
print(command_string)
r_list.append(r.json())
return r_list # return json
# =============================================================================
def NERSC_RetreiveData(filename,
username,
destinationpath,
archivepath=NERSC_DefaultPath):
'''
Downloads raw tomography projection data in NERSC from NERSC Archives
for a list of file names
'''
# Convert filename to list type if only one file name is given
if type(filename) != list:
filename=[filename]
# Generate file paths and destination paths
filePathIn = []
filePathOut = []
for i in range(len(filename)):
print(archivepath,useraccount,filename[i])
filePathIn.append( archivepath +useraccount+ "/" + filename[i] + "/raw/" + filename[i]+".h5" )
filePathOut.append( destinationpath + filename[i] + ".h5")
logging.info("file path list complete"); print(filePathIn)
logging.info("destination path list complete"); print(filePathOut)
# Copy Files to desintation
for i in range(len(filename)):
logging.info("begin transfer: "+filePathIn[i])
os.system("cp " + filePathIn[i] + " " + filePathOut[i])
logging.info("transfer complete: ")
>>>>>>> origin/master
| 30.676824
| 350
| 0.619511
| 3,898
| 33,223
| 5.166752
| 0.089277
| 0.025422
| 0.046177
| 0.061569
| 0.999404
| 0.999404
| 0.999404
| 0.999404
| 0.999404
| 0.999404
| 0
| 0.037369
| 0.183247
| 33,223
| 1,082
| 351
| 30.705176
| 0.704846
| 0.242513
| 0
| 0.965714
| 0
| 0
| 0.140279
| 0.012117
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.04
| 0.062857
| null | null | 0.04
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
390747172666158a8595e314eef5bdaaabd915b4
| 15,389
|
py
|
Python
|
purbeurre_project/apps/users/tests/functional/test_chrome.py
|
etiennody/purbeurre-v2
|
cee10b5ad3ccee6535f197070cd4ee80f2bad5d0
|
[
"MIT"
] | null | null | null |
purbeurre_project/apps/users/tests/functional/test_chrome.py
|
etiennody/purbeurre-v2
|
cee10b5ad3ccee6535f197070cd4ee80f2bad5d0
|
[
"MIT"
] | 3
|
2020-10-12T13:58:38.000Z
|
2020-11-12T01:02:14.000Z
|
purbeurre_project/apps/users/tests/functional/test_chrome.py
|
etiennody/purbeurre-v2
|
cee10b5ad3ccee6535f197070cd4ee80f2bad5d0
|
[
"MIT"
] | 1
|
2021-02-03T18:49:31.000Z
|
2021-02-03T18:49:31.000Z
|
"""Functional tests for users app
"""
import time
import unittest
from django.contrib.auth.models import User
from django.test import LiveServerTestCase
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
class RegisterSeleniumTest(LiveServerTestCase):
"""Register functional test with selenium
Args:
LiveServerTestCase ([type]): Do basically the same as TransactionTestCase but also launch a live HTTP server in a separate thread so that the tests use another testing framework, as Selenium, instead of the built-in dummy client.
"""
def setUp(self):
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.add_argument("--disable-gpu")
chrome_options.add_argument("--remote-debugging-port=9222")
chrome_options.add_argument("--window-size=1920x1080")
self.driver = webdriver.Chrome(chrome_options=chrome_options)
def test_valid_live_register_page(self):
"""Validate data entries on the registration page"""
self.driver.get("%s%s" % (self.live_server_url, "/register/"))
username = self.driver.find_element(By.ID, "id_username")
first_name = self.driver.find_element(By.ID, "id_first_name")
last_name = self.driver.find_element(By.ID, "id_last_name")
email = self.driver.find_element(By.ID, "id_email")
password1 = self.driver.find_element(By.ID, "id_password1")
password2 = self.driver.find_element(By.ID, "id_password2")
submit = self.driver.find_element(By.CLASS_NAME, "btn")
time.sleep(5)
self.driver.implicitly_wait(5)
username.send_keys("BobRobert")
first_name.send_keys("Bob")
last_name.send_keys("Robert")
email.send_keys("bobrobert@test.com")
password1.send_keys("fglZfYmr%?,9")
password2.send_keys("fglZfYmr%?,9")
submit.click()
time.sleep(5)
self.driver.implicitly_wait(5)
current_url = self.driver.current_url
if (self.driver.current_url[len(self.driver.current_url) - 1]) == "/":
current_url = self.driver.current_url[:-1]
self.assertEqual(current_url, "%s%s" % (self.live_server_url, "/login"))
self.assertIn("Se connecter", self.driver.page_source)
self.assertTrue(User.objects.filter(username="BobRobert").exists())
def tearDown(self):
self.driver.close()
class LoginSeleniumTest(LiveServerTestCase):
"""Login functional test with selenium
Args:
LiveServerTestCase ([type]): Do basically the same as TransactionTestCase but also launch a live HTTP server in a separate thread so that the tests use another testing framework, as Selenium, instead of the built-in dummy client.
"""
def setUp(self):
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.add_argument("--disable-gpu")
chrome_options.add_argument("--remote-debugging-port=9222")
chrome_options.add_argument("--window-size=1920x1080")
self.driver = webdriver.Chrome(chrome_options=chrome_options)
user = User.objects.create(
username="BobRobert",
first_name="Bob",
last_name="Robert",
email="test_bob@test.com",
)
user.set_password("fglZfYmr%?,9")
user.save()
super(LoginSeleniumTest, self).setUp()
def test_valid_live_login_page(self):
"""Validate data entries on the login page"""
self.driver.get("%s%s" % (self.live_server_url, "/login/"))
username = self.driver.find_element(By.ID, "id_username")
password = self.driver.find_element(By.ID, "id_password")
submit = self.driver.find_element(By.ID, "submit-button")
username.send_keys("BobRobert")
password.send_keys("fglZfYmr%?,9")
submit.send_keys(Keys.RETURN)
time.sleep(5)
self.driver.implicitly_wait(5)
current_url = self.driver.current_url
if (self.driver.current_url[len(self.driver.current_url) - 1]) == "/":
current_url = self.driver.current_url[:-1]
self.assertEqual(current_url, "%s" % (self.live_server_url))
self.assertIn("Accueil :: Purbeurre", self.driver.title)
def tearDown(self):
self.driver.close()
super(LoginSeleniumTest, self).tearDown()
class ChangePasswordSeleniumTest(LiveServerTestCase):
"""Change password in functional tests with selenium
Args:
LiveServerTestCase ([type]): Do basically the same as TransactionTestCase but also launch a live HTTP server in a separate thread so that the tests use another testing framework, as Selenium, instead of the built-in dummy client.
"""
def setUp(self):
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.add_argument("--disable-gpu")
chrome_options.add_argument("--remote-debugging-port=9222")
chrome_options.add_argument("--window-size=1920x1080")
self.driver = webdriver.Chrome(chrome_options=chrome_options)
user = User.objects.create(
username="BobRobert",
first_name="Bob",
last_name="Robert",
email="test_bob@test.com",
)
user.set_password("fglZfYmr%?,9")
user.save()
super(ChangePasswordSeleniumTest, self).setUp()
def test_valid_live_change_password_page(self):
"""Validate data entries on the change password page"""
self.driver.get("%s%s" % (self.live_server_url, "/login/"))
username = self.driver.find_element(By.ID, "id_username")
password = self.driver.find_element(By.ID, "id_password")
submit = self.driver.find_element(By.ID, "submit-button")
username.send_keys("BobRobert")
password.send_keys("fglZfYmr%?,9")
submit.send_keys(Keys.RETURN)
time.sleep(5)
self.driver.implicitly_wait(5)
self.driver.get("%s%s" % (self.live_server_url, "/password/"))
old_password = self.driver.find_element(By.ID, "id_old_password")
new_password1 = self.driver.find_element(By.ID, "id_new_password1")
new_password2 = self.driver.find_element(By.ID, "id_new_password2")
submit = self.driver.find_element(By.ID, "submit-button")
old_password.send_keys("fglZfYmr%?,9")
new_password1.send_keys("%h2KtHFJ_%JY")
new_password2.send_keys("%h2KtHFJ_%JY")
submit.send_keys(Keys.RETURN)
time.sleep(5)
self.driver.implicitly_wait(5)
current_url = self.driver.current_url
if (self.driver.current_url[len(self.driver.current_url) - 1]) == "/":
current_url = self.driver.current_url[:-1]
self.assertEqual(
current_url, "%s%s" % (self.live_server_url, "/password_success")
)
self.assertIn("Mot de passe modifié :: Purbeurre", self.driver.title)
self.assertIn(
"Votre mot de passe a bien été modifié avec succès !",
self.driver.page_source,
)
def test_invalid_live_change_password_with_personal_information(self):
"""Unvalidate data entries on the change password page with personal information"""
self.driver.get("%s%s" % (self.live_server_url, "/login/"))
username = self.driver.find_element(By.ID, "id_username")
password = self.driver.find_element(By.ID, "id_password")
submit = self.driver.find_element(By.ID, "submit-button")
username.send_keys("BobRobert")
password.send_keys("fglZfYmr%?,9")
submit.send_keys(Keys.RETURN)
time.sleep(5)
self.driver.implicitly_wait(5)
self.driver.get("%s%s" % (self.live_server_url, "/password/"))
old_password = self.driver.find_element(By.ID, "id_old_password")
new_password1 = self.driver.find_element(By.ID, "id_new_password1")
new_password2 = self.driver.find_element(By.ID, "id_new_password2")
submit = self.driver.find_element(By.ID, "submit-button")
old_password.send_keys("fglZfYmr%?,9")
new_password1.send_keys("BobRobert")
new_password2.send_keys("BobRobert")
submit.send_keys(Keys.RETURN)
time.sleep(5)
self.driver.implicitly_wait(5)
current_url = self.driver.current_url
if (self.driver.current_url[len(self.driver.current_url) - 1]) == "/":
current_url = self.driver.current_url[:-1]
self.assertEqual(current_url, "%s%s" % (self.live_server_url, "/password"))
self.assertIn(
"Le mot de passe est trop semblable au champ « nom d’utilisateur ».",
self.driver.page_source,
)
def test_invalid_live_change_password_with_only_number(self):
"""Unvalidate data entries on the change password page with only number"""
self.driver.get("%s%s" % (self.live_server_url, "/login/"))
username = self.driver.find_element(By.ID, "id_username")
password = self.driver.find_element(By.ID, "id_password")
submit = self.driver.find_element(By.ID, "submit-button")
username.send_keys("BobRobert")
password.send_keys("fglZfYmr%?,9")
submit.send_keys(Keys.RETURN)
time.sleep(5)
self.driver.implicitly_wait(5)
self.driver.get("%s%s" % (self.live_server_url, "/password/"))
old_password = self.driver.find_element(By.ID, "id_old_password")
new_password1 = self.driver.find_element(By.ID, "id_new_password1")
new_password2 = self.driver.find_element(By.ID, "id_new_password2")
submit = self.driver.find_element(By.ID, "submit-button")
old_password.send_keys("fglZfYmr%?,9")
new_password1.send_keys("12345678")
new_password2.send_keys("12345678")
submit.send_keys(Keys.RETURN)
time.sleep(5)
self.driver.implicitly_wait(5)
current_url = self.driver.current_url
if (self.driver.current_url[len(self.driver.current_url) - 1]) == "/":
current_url = self.driver.current_url[:-1]
self.assertEqual(current_url, "%s%s" % (self.live_server_url, "/password"))
self.assertIn(
"Ce mot de passe est entièrement numérique.", self.driver.page_source
)
def test_invalid_live_change_password_with_short_entries(self):
"""Unvalidate data entries on the change password page with short entries"""
self.driver.get("%s%s" % (self.live_server_url, "/login/"))
username = self.driver.find_element(By.ID, "id_username")
password = self.driver.find_element(By.ID, "id_password")
submit = self.driver.find_element(By.ID, "submit-button")
username.send_keys("BobRobert")
password.send_keys("fglZfYmr%?,9")
submit.send_keys(Keys.RETURN)
time.sleep(5)
self.driver.implicitly_wait(5)
self.driver.get("%s%s" % (self.live_server_url, "/password/"))
old_password = self.driver.find_element(By.ID, "id_old_password")
new_password1 = self.driver.find_element(By.ID, "id_new_password1")
new_password2 = self.driver.find_element(By.ID, "id_new_password2")
submit = self.driver.find_element(By.ID, "submit-button")
old_password.send_keys("fglZfYmr%?,9")
new_password1.send_keys("Q=3")
new_password2.send_keys("Q=3")
submit.send_keys(Keys.RETURN)
time.sleep(5)
self.driver.implicitly_wait(5)
current_url = self.driver.current_url
if (self.driver.current_url[len(self.driver.current_url) - 1]) == "/":
current_url = self.driver.current_url[:-1]
self.assertEqual(current_url, "%s%s" % (self.live_server_url, "/password"))
self.assertIn(
"Ce mot de passe est trop court. Il doit contenir au minimum 8 caractères.",
self.driver.page_source,
)
def test_invalid_live_change_password_with_differents_new_passwords(self):
"""Unvalidate data entries on the change password page with short entries"""
self.driver.get("%s%s" % (self.live_server_url, "/login/"))
username = self.driver.find_element(By.ID, "id_username")
password = self.driver.find_element(By.ID, "id_password")
submit = self.driver.find_element(By.ID, "submit-button")
username.send_keys("BobRobert")
password.send_keys("fglZfYmr%?,9")
submit.send_keys(Keys.RETURN)
time.sleep(5)
self.driver.implicitly_wait(5)
self.driver.get("%s%s" % (self.live_server_url, "/password/"))
old_password = self.driver.find_element(By.ID, "id_old_password")
new_password1 = self.driver.find_element(By.ID, "id_new_password1")
new_password2 = self.driver.find_element(By.ID, "id_new_password2")
submit = self.driver.find_element(By.ID, "submit-button")
old_password.send_keys("fglZfYmr%?,9")
new_password1.send_keys("tbf:[D=5")
new_password2.send_keys("kOx`Y{nM")
submit.send_keys(Keys.RETURN)
time.sleep(5)
self.driver.implicitly_wait(5)
current_url = self.driver.current_url
if (self.driver.current_url[len(self.driver.current_url) - 1]) == "/":
current_url = self.driver.current_url[:-1]
self.assertEqual(current_url, "%s%s" % (self.live_server_url, "/password"))
self.assertIn(
"Les deux mots de passe ne correspondent pas.",
self.driver.page_source,
)
def test_invalid_live_change_password_with_wrong_old_password(self):
"""Unvalidate data entries on the change password page with short entries"""
self.driver.get("%s%s" % (self.live_server_url, "/login/"))
username = self.driver.find_element(By.ID, "id_username")
password = self.driver.find_element(By.ID, "id_password")
submit = self.driver.find_element(By.ID, "submit-button")
username.send_keys("BobRobert")
password.send_keys("fglZfYmr%?,9")
submit.send_keys(Keys.RETURN)
time.sleep(5)
self.driver.implicitly_wait(5)
self.driver.get("%s%s" % (self.live_server_url, "/password/"))
old_password = self.driver.find_element(By.ID, "id_old_password")
new_password1 = self.driver.find_element(By.ID, "id_new_password1")
new_password2 = self.driver.find_element(By.ID, "id_new_password2")
submit = self.driver.find_element(By.ID, "submit-button")
old_password.send_keys("fglZfYmr%?,")
new_password1.send_keys("kOx`Y{nM")
new_password2.send_keys("kOx`Y{nM")
submit.send_keys(Keys.RETURN)
time.sleep(5)
self.driver.implicitly_wait(5)
current_url = self.driver.current_url
if (self.driver.current_url[len(self.driver.current_url) - 1]) == "/":
current_url = self.driver.current_url[:-1]
self.assertEqual(current_url, "%s%s" % (self.live_server_url, "/password"))
self.assertIn(
"Votre ancien mot de passe est incorrect. Veuillez le rectifier.",
self.driver.page_source,
)
def tearDown(self):
self.driver.close()
super(ChangePasswordSeleniumTest, self).tearDown()
| 43.968571
| 237
| 0.659952
| 1,984
| 15,389
| 4.909274
| 0.099798
| 0.131417
| 0.074743
| 0.112115
| 0.843224
| 0.83347
| 0.825257
| 0.799589
| 0.782238
| 0.777823
| 0
| 0.013351
| 0.211515
| 15,389
| 349
| 238
| 44.094556
| 0.789187
| 0.08935
| 0
| 0.745455
| 0
| 0
| 0.148782
| 0.012506
| 0
| 0
| 0
| 0
| 0.065455
| 1
| 0.050909
| false
| 0.305455
| 0.029091
| 0
| 0.090909
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
1a8e8646374f2d4b2012fcbe855f6179a7654b83
| 7,155
|
py
|
Python
|
gui/action/fileIOAction.py
|
swyang50066/medical-image-segmentation
|
bf53a19864bb31d8d4828b3bf42d9cf5f6bf361c
|
[
"MIT"
] | 1
|
2022-03-23T07:36:58.000Z
|
2022-03-23T07:36:58.000Z
|
gui/action/fileIOAction.py
|
swyang50066/medical-image-segmentation
|
bf53a19864bb31d8d4828b3bf42d9cf5f6bf361c
|
[
"MIT"
] | null | null | null |
gui/action/fileIOAction.py
|
swyang50066/medical-image-segmentation
|
bf53a19864bb31d8d4828b3bf42d9cf5f6bf361c
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from file import File
class FileIOAction(object):
def __init__(self):
super().__init__()
def openDICOM(self):
''' Open input file
'''
# Read file path
path = QFileDialog.getOpenFileName()
# No path given
if not str(path[0]): return 0
# Get image layers
layers, attribute = File.importDICOM(str(path[0]))
self.canvas.image = layers[0]
self.canvas.seedOverlay = layers[1]
self.canvas.segmentOverlay = layers[2]
self.canvas.seed = np.zeros_like(layers[1])[..., 0]
self.canvas.spacing = layers[3]
# Reset label
self.canvas.label = np.zeros_like(self.canvas.seed)
# Reset markers
self.markers = {"dot": [],
"line": [],
"arc": [],
"box": [],
"curve": []}
self.invisibleMarkerIndex = {"dot": [],
"line": [],
"arc": [],
"box": [],
"curve": []}
self.markerHashMap = {}
# Reset overview items
overviewTreeItems = self.overviewTree.findItems(
"Marker",
Qt.MatchContains |
Qt.MatchRecursive)
for overviewTreeItem in overviewTreeItems:
self.annotationTreeItem.removeChild(overviewTreeItem)
self.segmentTreeItem.takeChildren()
self.segmentTreeItem.setFlags(Qt.ItemIsSelectable)
self.CPRTreeItem.takeChildren()
self.CPRTreeItem.setFlags(Qt.ItemIsSelectable)
# Display image
axialQPixmap = self.canvas.getQPixmap(
self.canvas.getImageWithSeed())
segmentQPixmap = self.canvas.getQPixmap(
self.canvas.getImageWithSegment())
coronalQPixmap = self.canvas.getQPixmap(
self.canvas.getImageWithCoronal())
sagittalQPixmap = self.canvas.getQPixmap(
self.canvas.getImageWithSagittal())
self.axialQLabel.setPixmap(axialQPixmap)
self.segmentQLabel.setPixmap(segmentQPixmap)
self.coronalQLabel.setPixmap(coronalQPixmap)
self.sagittalQLabel.setPixmap(sagittalQPixmap)
# Adjust scroll bar range
self.scrollBar.setMaximum(len(self.canvas.image)-1)
# Set histogram
self.canvas.gamma = 1
self.canvas.valmin, self.canvas.valmax = 0, 255
self.histogramView.initialize(self.canvas.image,
self.histCanvas,
self.histAxes)
# Set DICOM attribute table
for k, (key, value) in enumerate(attribute.items()):
tableItem = QTableWidgetItem(value)
self.dicomTable.setItem(k, 0, tableItem)
# Update python console variables
variables = {"image": self.canvas.image}
self.ipyConsole.pushVariables(variables)
def openFile(self):
''' Open input file
'''
# Read file path
path = QFileDialog.getOpenFileName()
# No path given
if not str(path[0]): return 0
# Get image layers
layers = File.importImage(str(path[0]))
self.canvas.image = layers[0]
self.canvas.seedOverlay = layers[1]
self.canvas.segmentOverlay = layers[2]
self.canvas.seed = np.zeros_like(layers[1])[..., 0]
# Reset label
self.canvas.label = np.uint8(self.canvas.image[..., 1]
- self.canvas.image[..., 2] == 255)
#np.zeros_like(self.canvas.seed)
# Reset markers
self.markers = {"dot": [],
"line": [],
"arc": [],
"box": [],
"curve": []}
self.invisibleMarkerIndex = {"dot": [],
"line": [],
"arc": [],
"box": [],
"curve": []}
self.markerHashMap = {}
# Reset overview items
overviewTreeItems = self.overviewTree.findItems(
"Marker",
Qt.MatchContains |
Qt.MatchRecursive)
for overviewTreeItem in overviewTreeItems:
self.annotationTreeItem.removeChild(overviewTreeItem)
self.segmentTreeItem.takeChildren()
self.segmentTreeItem.setFlags(Qt.ItemIsSelectable)
self.CPRTreeItem.takeChildren()
self.CPRTreeItem.setFlags(Qt.ItemIsSelectable)
# Display image
axialQPixmap = self.canvas.getQPixmap(
self.canvas.getImageWithSeed())
segmentQPixmap = self.canvas.getQPixmap(
self.canvas.getImageWithSegment())
coronalQPixmap = self.canvas.getQPixmap(
self.canvas.getImageWithCoronal())
sagittalQPixmap = self.canvas.getQPixmap(
self.canvas.getImageWithSagittal())
self.axialQLabel.setPixmap(axialQPixmap)
self.segmentQLabel.setPixmap(segmentQPixmap)
self.coronalQLabel.setPixmap(coronalQPixmap)
self.sagittalQLabel.setPixmap(sagittalQPixmap)
# Adjust scroll bar range
self.scrollBar.setMaximum(len(self.canvas.image)-1)
# Set histogram
self.canvas.gamma = 1
self.canvas.valmin, self.canvas.valmax = 0, 255
self.histogramView.initialize(self.canvas.image,
self.histCanvas,
self.histAxes)
# Update python console variables
variables = {"image": self.canvas.image}
self.ipyConsole.pushVariables(variables)
def saveFile(self):
''' Save output file
'''
# Read output path
path = QFileDialog.getExistingDirectory()
# save output file
File.exportImage(path,
self.canvas.image,
self.canvas.label,
self.canvas.colorSeed,
fformat='bmp')
def saveSTL(self):
''' Save STL
'''
# Read output path
path = QFileDialog.getExistingDirectory()
# save output file
File.exportSTL(path,
self.canvas.label)
def closeWindow(self):
''' Close application window
'''
self.window.close()
| 35.775
| 72
| 0.500908
| 556
| 7,155
| 6.42446
| 0.255396
| 0.131579
| 0.046193
| 0.053751
| 0.832587
| 0.832587
| 0.818029
| 0.818029
| 0.818029
| 0.818029
| 0
| 0.009401
| 0.405311
| 7,155
| 199
| 73
| 35.954774
| 0.830082
| 0.085395
| 0
| 0.755906
| 0
| 0
| 0.01496
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047244
| false
| 0
| 0.062992
| 0
| 0.11811
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1aa4528375f8f7db2a58e1857e94b6391941d0cb
| 8,790
|
py
|
Python
|
project/dqn_all_games.py
|
aditya140/rlcard
|
de203b9b74a653019452aeb0622345f33dd42eda
|
[
"MIT"
] | null | null | null |
project/dqn_all_games.py
|
aditya140/rlcard
|
de203b9b74a653019452aeb0622345f33dd42eda
|
[
"MIT"
] | null | null | null |
project/dqn_all_games.py
|
aditya140/rlcard
|
de203b9b74a653019452aeb0622345f33dd42eda
|
[
"MIT"
] | null | null | null |
import torch
import os
import sys
sys.path.append(".")
import rlcard
from rlcard.agents import RandomAgent, DQN_agent, DQN_conf
from rlcard.utils import set_global_seed, tournament
from rlcard.utils import Logger
def train_blackjack():
# Make environment
env = rlcard.make("blackjack", config={"seed": 0})
eval_env = rlcard.make("blackjack", config={"seed": 0})
# Set the iterations numbers and how frequently we evaluate the performance
evaluate_every = 100
evaluate_num = 1000
episode_num = 3000
# The intial memory size
memory_init_size = 1000
# Train the agent every X steps
train_every = 100
# The paths for saving the logs and learning curves
log_dir = "./experiments/blackjack_results_dqn/"
# Set a global seed
set_global_seed(0)
params = {
"scope": "DQN-Agent",
"num_actions": env.action_num,
"replay_memory_size": memory_init_size,
"num_states": env.state_shape,
"discount_factor": 0.99,
"epsilon_start": 1.0,
"epsilon_end": 0.1,
"epsilon_decay_steps": 20000,
"batch_size": 32,
"train_every": 1,
"mlp_layers": [128, 128],
"lr": 0.0005,
}
agent_conf = DQN_conf(**params)
agent = DQN_agent(agent_conf)
random_agent = RandomAgent(action_num=eval_env.action_num)
env.set_agents([agent, random_agent])
eval_env.set_agents([agent, random_agent])
logger = Logger(log_dir)
for episode in range(episode_num):
# Generate data from the environment
trajectories, _ = env.run(is_training=True)
# Feed transitions into agent memory, and train the agent
for ts in trajectories[0]:
agent.feed(ts)
# Evaluate the performance. Play with random agents.
if episode % evaluate_every == 0:
logger.log_performance(env.timestep, tournament(eval_env, evaluate_num)[0])
# Close files in the logger
logger.close_files()
# Plot the learning curve
logger.plot("DQN BLACKJACK")
# Save model
save_dir = "models/blackjack_dqn_pytorch"
if not os.path.exists(save_dir):
os.makedirs(save_dir)
state_dict = agent.get_state_dict()
print(state_dict.keys())
torch.save(state_dict, os.path.join(save_dir, "model.pth"))
def train_uno():
# Make environment
env = rlcard.make("uno", config={"seed": 0})
eval_env = rlcard.make("uno", config={"seed": 0})
# Set the iterations numbers and how frequently we evaluate the performance
evaluate_every = 100
evaluate_num = 1000
episode_num = 3000
# The intial memory size
memory_init_size = 1000
# Train the agent every X steps
train_every = 100
# The paths for saving the logs and learning curves
log_dir = "./experiments/uno_results_dqn/"
# Set a global seed
set_global_seed(0)
params = {
"scope": "DQN-Agent",
"num_actions": env.action_num,
"replay_memory_size": memory_init_size,
"num_states": env.state_shape,
"discount_factor": 0.99,
"epsilon_start": 1.0,
"epsilon_end": 0.1,
"epsilon_decay_steps": 20000,
"batch_size": 32,
"train_every": 1,
"mlp_layers": [512, 512],
"lr": 0.0005,
}
agent_conf = DQN_conf(**params)
agent = DQN_agent(agent_conf)
random_agent = RandomAgent(action_num=eval_env.action_num)
env.set_agents([agent, random_agent])
eval_env.set_agents([agent, random_agent])
logger = Logger(log_dir)
for episode in range(episode_num):
# Generate data from the environment
trajectories, _ = env.run(is_training=True)
# Feed transitions into agent memory, and train the agent
for ts in trajectories[0]:
agent.feed(ts)
# Evaluate the performance. Play with random agents.
if episode % evaluate_every == 0:
logger.log_performance(env.timestep, tournament(eval_env, evaluate_num)[0])
# Close files in the logger
logger.close_files()
# Plot the learning curve
logger.plot("DQN UNO")
# Save model
save_dir = "models/uno_dqn_pytorch"
if not os.path.exists(save_dir):
os.makedirs(save_dir)
state_dict = agent.get_state_dict()
print(state_dict.keys())
torch.save(state_dict, os.path.join(save_dir, "model.pth"))
def train_leduc_holdem_poker():
# Make environment
env = rlcard.make("leduc-holdem", config={"seed": 0})
eval_env = rlcard.make("leduc-holdem", config={"seed": 0})
# Set the iterations numbers and how frequently we evaluate the performance
evaluate_every = 100
evaluate_num = 1000
episode_num = 3000
# The intial memory size
memory_init_size = 1000
# Train the agent every X steps
train_every = 100
# The paths for saving the logs and learning curves
log_dir = "./experiments/leduc_holdem_results_dqn/"
# Set a global seed
set_global_seed(0)
params = {
"scope": "DQN-Agent",
"num_actions": env.action_num,
"replay_memory_size": memory_init_size,
"num_states": env.state_shape,
"discount_factor": 0.99,
"epsilon_start": 1.0,
"epsilon_end": 0.1,
"epsilon_decay_steps": 20000,
"batch_size": 32,
"train_every": 1,
"mlp_layers": [128, 128],
"lr": 0.0005,
}
agent_conf = DQN_conf(**params)
agent = DQN_agent(agent_conf)
random_agent = RandomAgent(action_num=eval_env.action_num)
env.set_agents([agent, random_agent])
eval_env.set_agents([agent, random_agent])
logger = Logger(log_dir)
for episode in range(episode_num):
# Generate data from the environment
trajectories, _ = env.run(is_training=True)
# Feed transitions into agent memory, and train the agent
for ts in trajectories[0]:
agent.feed(ts)
# Evaluate the performance. Play with random agents.
if episode % evaluate_every == 0:
logger.log_performance(env.timestep, tournament(eval_env, evaluate_num)[0])
# Close files in the logger
logger.close_files()
# Plot the learning curve
logger.plot("DQN Leduc holdem")
# Save model
save_dir = "models/leduc_holdem_dqn_pytorch"
if not os.path.exists(save_dir):
os.makedirs(save_dir)
state_dict = agent.get_state_dict()
print(state_dict.keys())
torch.save(state_dict, os.path.join(save_dir, "model.pth"))
def train_mahjong():
# Make environment
env = rlcard.make("mahjong", config={"seed": 0})
eval_env = rlcard.make("mahjong", config={"seed": 0})
# Set the iterations numbers and how frequently we evaluate the performance
evaluate_every = 100
evaluate_num = 1000
episode_num = 3000
# The intial memory size
memory_init_size = 1000
# Train the agent every X steps
train_every = 100
# The paths for saving the logs and learning curves
log_dir = "./experiments/mahjong_results_dqn/"
# Set a global seed
set_global_seed(0)
params = {
"scope": "DQN-Agent",
"num_actions": env.action_num,
"replay_memory_size": memory_init_size,
"num_states": env.state_shape,
"discount_factor": 0.99,
"epsilon_start": 1.0,
"epsilon_end": 0.1,
"epsilon_decay_steps": 20000,
"batch_size": 32,
"train_every": 1,
"mlp_layers": [512, 512],
"lr": 0.0005,
}
agent_conf = DQN_conf(**params)
agent = DQN_agent(agent_conf)
random_agent = RandomAgent(action_num=eval_env.action_num)
env.set_agents([agent, random_agent, random_agent, random_agent])
eval_env.set_agents([agent, random_agent, random_agent, random_agent])
logger = Logger(log_dir)
for episode in range(episode_num):
# Generate data from the environment
trajectories, _ = env.run(is_training=True)
# Feed transitions into agent memory, and train the agent
for ts in trajectories[0]:
agent.feed(ts)
# Evaluate the performance. Play with random agents.
if episode % evaluate_every == 0:
logger.log_performance(env.timestep, tournament(eval_env, evaluate_num)[0])
# Close files in the logger
logger.close_files()
# Plot the learning curve
logger.plot("DQN Mahjong")
# Save model
save_dir = "models/mahjong_dqn_pytorch"
if not os.path.exists(save_dir):
os.makedirs(save_dir)
state_dict = agent.get_state_dict()
print(state_dict.keys())
torch.save(state_dict, os.path.join(save_dir, "model.pth"))
if __name__ == "__main__":
# train_uno()
# train_blackjack()
# train_leduc_holdem_poker()
train_mahjong()
| 28.083067
| 87
| 0.650739
| 1,173
| 8,790
| 4.642796
| 0.108269
| 0.020566
| 0.035255
| 0.029379
| 0.93206
| 0.904884
| 0.904884
| 0.874587
| 0.863753
| 0.863753
| 0
| 0.030161
| 0.24562
| 8,790
| 312
| 88
| 28.173077
| 0.791133
| 0.195222
| 0
| 0.784946
| 0
| 0
| 0.143549
| 0.035033
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021505
| false
| 0
| 0.037634
| 0
| 0.05914
| 0.021505
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
46c7d0b372988e0fe0d4f07d705083852b232e7d
| 46
|
py
|
Python
|
rap/sweeps/data_management/__init__.py
|
mdaal/rap
|
b18f3ffb71688ccbd967bae153b058054870a88c
|
[
"MIT"
] | null | null | null |
rap/sweeps/data_management/__init__.py
|
mdaal/rap
|
b18f3ffb71688ccbd967bae153b058054870a88c
|
[
"MIT"
] | null | null | null |
rap/sweeps/data_management/__init__.py
|
mdaal/rap
|
b18f3ffb71688ccbd967bae153b058054870a88c
|
[
"MIT"
] | null | null | null |
print("rap.sweeps.data_management level init")
| 46
| 46
| 0.826087
| 7
| 46
| 5.285714
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.043478
| 46
| 1
| 46
| 46
| 0.840909
| 0
| 0
| 0
| 0
| 0
| 0.787234
| 0.553191
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
46fadde31872e402057d8780216304033e31b735
| 164
|
py
|
Python
|
g2base/remoteObjects/ro_pubsub.py
|
naojsoft/g2cam
|
4f01cdccae7978d5c16af59a90ff7459ed6c2997
|
[
"BSD-3-Clause"
] | null | null | null |
g2base/remoteObjects/ro_pubsub.py
|
naojsoft/g2cam
|
4f01cdccae7978d5c16af59a90ff7459ed6c2997
|
[
"BSD-3-Clause"
] | null | null | null |
g2base/remoteObjects/ro_pubsub.py
|
naojsoft/g2cam
|
4f01cdccae7978d5c16af59a90ff7459ed6c2997
|
[
"BSD-3-Clause"
] | null | null | null |
#
# Choose internal version of pubsub
#
from .pubsubs.pubsub_redis import PubSub
#from .pubsubs.pubsub_redis import PubSub
#from .pubsubs.pubsub_zmq import PubSub
| 20.5
| 41
| 0.804878
| 23
| 164
| 5.608696
| 0.434783
| 0.232558
| 0.395349
| 0.534884
| 0.705426
| 0.705426
| 0.705426
| 0.705426
| 0.705426
| 0.705426
| 0
| 0
| 0.128049
| 164
| 7
| 42
| 23.428571
| 0.902098
| 0.676829
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 13
|
2045a25e83c213b504148ccf226118ae86429ab0
| 28,389
|
py
|
Python
|
Boids.py
|
Dradoue/Boids
|
d7b79e49243c4a6fd437285b58ef6c0899e910d2
|
[
"MIT"
] | 2
|
2021-04-06T14:41:27.000Z
|
2021-08-09T06:11:49.000Z
|
Boids.py
|
Dradoue/Boids
|
d7b79e49243c4a6fd437285b58ef6c0899e910d2
|
[
"MIT"
] | null | null | null |
Boids.py
|
Dradoue/Boids
|
d7b79e49243c4a6fd437285b58ef6c0899e910d2
|
[
"MIT"
] | null | null | null |
import time
# import cupy as cp
import numpy as np
from Physics2D import Physics2D
from constants import WINDOW_SIZE, \
BOID_VIEW_ANGLE, BOID_VIEW, \
EPSILON, LEAF_SIZE, \
SEPARATION_DIST, \
SEPARATION_FORCE, \
COHESION_FORCE, ALIGNMENT_FORCE, \
CHASING_FORCE, FLEE_FORCE, \
RELATIONS_SPECIES
# imports from local files
from utils import angle_between
class Boids(Physics2D):
"""
Boids class, inherited from Physics2D, define the behavior of boids
:param num_entities: number of boids for each species, (ex: [12,13] mean 12 boids for species 1 and
13 for species 2)
:param positions: initial positions of boids, 2D array of shape (n_boids, 2)
:param velocities: initial velocities of boids, 2D array of shape (n_boids, 2)
:param min_speed: min speed (l2-norm) of boids
:param max_speed: max speed (l2-norm) of boids
:param max_turn: max angle change a boid can have on one step, pi/48 by default.
"""
def __init__(self, num_entities, positions, velocities,
min_speed, max_speed, max_turn, dt=1):
super().__init__(positions, velocities,
min_speed, max_speed, max_turn, dt)
self.positions = positions
self.list_number_boids = num_entities
self.grid = \
[[[] for _ in range(int((WINDOW_SIZE[0] / LEAF_SIZE)))]
for _ in range(int(WINDOW_SIZE[1] / LEAF_SIZE))]
self.steering = np.zeros(shape=(self.nb_entities, 2))
def update_boids(self):
"""
calculate acceleration for each boid with the rules.
"""
self.update_grid()
self.apply_sca()
self.update(self.steering)
def apply_sca(self):
"""
apply separation, cohesion, alignment, chase and flee rules
"""
global timer1
timer1 = time.time()
self.steering = np.zeros(shape=(self.nb_entities, 2))
for i in np.arange(int(WINDOW_SIZE[0] / LEAF_SIZE)):
for j in np.arange(int(WINDOW_SIZE[1] / LEAF_SIZE)):
sum = 0
grid = self.grid[i][j]
list_species_indices = list()
# for each species we apply CSA.
n_species = 0
for num_boids in self.list_number_boids:
boids_indices = np.arange(sum, sum + num_boids)
sum += num_boids
list_ind = list()
for ind in boids_indices:
if ind in grid:
list_ind.append(ind)
list_species_indices.append(list_ind)
self.apply_cohesion(list_ind, n_species)
self.apply_alignment(list_ind, n_species)
self.apply_separation(list_ind, n_species)
n_species += 1
# apply chasing and fleeing relations
for k in range(RELATIONS_SPECIES.shape[0]):
for l in range(RELATIONS_SPECIES.shape[0]):
# species i chase species j
if RELATIONS_SPECIES[k, l] == 1:
self.apply_chasing(list_species_indices[k], list_species_indices[l], k)
# species i flee species j
if RELATIONS_SPECIES[k, l] == -1:
self.apply_fleeing(list_species_indices[k], list_species_indices[l], k)
timer2 = time.time()
print("time for one timestep:", timer2 - timer1)
def update_grid(self):
"""
update the grid
"""
self.grid = [[[] for _ in np.arange(int((WINDOW_SIZE[0] / LEAF_SIZE)))]
for _ in np.arange(int(WINDOW_SIZE[1] / LEAF_SIZE))]
indices_ = np.concatenate((np.array(np.floor(self.positions[:, 0] / LEAF_SIZE), dtype=int)[:, np.newaxis],
np.array(np.floor(self.positions[:, 1] / LEAF_SIZE), dtype=int)[:, np.newaxis],
np.arange(self.nb_entities, dtype=int)[:, np.newaxis]), axis=1)
for i, j, k in indices_:
self.grid[i][j].append(k)
def apply_cohesion(self, indices, n_specie):
"""
apply cohesion rule to boids which have indices :indices: for species :n_specie:
:param indices: indices to apply the rule
:n_specie: the species, an integer between 0 and num_species (used for parameters)
"""
if len(indices) <= 1:
return
for ind in indices:
indices_ = list(indices)
indices_.remove(ind)
indices_ = np.array(indices_)
dist = np.linalg.norm(self.positions[indices_, :]
- self.positions[ind, :], axis=1)
# neighbors indices relatively to indices_
neighbors = np.where(dist < BOID_VIEW[n_specie])[0]
if neighbors.shape[0] > 0:
# true_neighbors: true indices for positions and velocities
# neighbors: indices of neighbors relatively to indices_
true_neighbors = indices_[neighbors]
diff = self.positions[true_neighbors, :] \
- self.positions[ind, :]
with np.errstate(invalid='ignore'):
respect_angles = \
np.where(angle_between(self.velocities[ind, :], diff)
<= BOID_VIEW_ANGLE[n_specie] / 2)[0]
# respect_angles: indices of neighbors
# relatively to true_neighbors
final_true_neighbors = true_neighbors[respect_angles]
if final_true_neighbors.shape[0] > 0:
cohesion = np.mean(self.positions[final_true_neighbors, :],
axis=0) - self.positions[ind, :]
norm_cohesion = np.linalg.norm(cohesion)
if norm_cohesion > EPSILON:
self.steering[ind, :] += COHESION_FORCE[n_specie] * \
(cohesion / norm_cohesion)
def apply_separation(self, indices, n_specie):
"""
apply separation rule to boids which have indices :indices: for species number :n_specie:
:param indices: indices of boids to apply the rule
:n_specie: the species, an integer between 0 and num_species (used for parameters)
"""
if len(indices) <= 1:
return
for ind in indices:
indices_ = list(indices)
indices_.remove(ind)
indices_ = np.array(indices_)
# get the distances
# l2 norms.
dist_norm = np.linalg.norm(self.positions[indices_, :]
- self.positions[ind, :], axis=1)
dist = self.positions[indices_, :] \
- self.positions[ind, :]
neighbors = np.where(dist_norm <= SEPARATION_DIST[n_specie])[0]
if neighbors.shape[-1] != 0:
respect_angles = \
np.where(angle_between(self.velocities[ind, :], dist[neighbors])
<= BOID_VIEW_ANGLE[n_specie] / 2)[0]
# neighbors that respect angles respectively to indices_
neighbors_that_respect_angle = neighbors[respect_angles]
if neighbors_that_respect_angle.shape[0] > 0:
sep = np.sum(dist[neighbors_that_respect_angle, :], axis=0)
self.steering[ind, :] -= SEPARATION_FORCE[n_specie] * sep
# sep/dist_norm?
def apply_alignment(self, indices, n_specie):
"""
apply alignment rule to boids which have indices :indices: for species number :n_specie:
alignment -> boids tends to move like their neighbors
:param indices: indices of boids to apply the rule
:n_specie: the species, an integer between 0 and num_species (used for parameters)
"""
if len(indices) <= 1:
return
for ind in indices:
indices_ = list(indices)
indices_.remove(ind)
indices_ = np.array(indices_)
dist = np.linalg.norm(self.positions[indices_, :]
- self.positions[ind, :], axis=1)
neighbors = np.where(dist < BOID_VIEW[n_specie])[0]
# neighbors relatively to indices_
if neighbors.shape[-1] != 0:
true_indices_neighbors = indices_[neighbors]
diff = self.positions[true_indices_neighbors, :] \
- self.positions[ind, :]
respect_angles = \
np.where(angle_between(self.velocities[ind, :], diff)
<= BOID_VIEW_ANGLE[n_specie] / 2)[0]
# neighbors relatively to true_indices_neighbors
final_true_neighbors = true_indices_neighbors[respect_angles]
if final_true_neighbors.shape[0] > 0:
mean_vel = np.mean(self.velocities[final_true_neighbors, :], axis=0)
norm_vel = np.linalg.norm(mean_vel)
if norm_vel > EPSILON:
self.steering[ind, :] += ALIGNMENT_FORCE[n_specie] * \
(mean_vel / norm_vel)
def apply_chasing(self, indices_chase, indices_chased, n_specie):
"""
apply alignment rule to boids which have indices :indices: for species number :n_specie:
alignment -> boids tends to move like their neighbors
:param indices: indices of boids to apply the rule
:n_specie: the species, an integer between 0 and num_species (used for parameters)
"""
if len(indices_chase) == 0 or len(indices_chased) == 0:
return
# we take each that are chasing
for ind in indices_chase:
indices_ = list(indices_chased)
indices_ = np.array(indices_)
dist = np.linalg.norm(self.positions[indices_, :]
- self.positions[ind, :], axis=1)
# neighbors indices relatively to indices_
neighbors = np.where(dist < BOID_VIEW[n_specie])[0]
if neighbors.shape[0] > 0:
# true_neighbors: true indices for positions and velocities
# neighbors: indices of neighbors relatively to indices_
true_neighbors = indices_[neighbors]
diff = self.positions[true_neighbors, :] \
- self.positions[ind, :]
with np.errstate(invalid='ignore'):
respect_angles = \
np.where(angle_between(self.velocities[ind, :], diff)
<= BOID_VIEW_ANGLE[n_specie] / 2)[0]
# respect_angles: indices of neighbors
# relatively to true_neighbors
final_true_neighbors = true_neighbors[respect_angles]
if final_true_neighbors.shape[0] > 0:
cohesion = np.mean(self.positions[final_true_neighbors, :],
axis=0) - self.positions[ind, :]
norm_cohesion = np.linalg.norm(cohesion)
if norm_cohesion > EPSILON:
self.steering[ind, :] += CHASING_FORCE[n_specie] * \
(cohesion / norm_cohesion)
def apply_fleeing(self, indices_flee, indices_fleed, n_specie):
if len(indices_flee) == 0 or len(indices_fleed) == 0:
return
# we take each that are fleeing
for ind in indices_flee:
indices_ = list(indices_fleed)
indices_ = np.array(indices_)
# get the distances
dist_norm = np.linalg.norm(self.positions[indices_, :]
- self.positions[ind, :], axis=1)
dist = self.positions[indices_, :] \
- self.positions[ind, :]
neighbors = np.where(dist_norm <= BOID_VIEW[n_specie])[0]
if neighbors.shape[-1] != 0:
respect_angles = \
np.where(angle_between(self.velocities[ind, :], dist[neighbors])
<= BOID_VIEW_ANGLE[n_specie] / 2)[0]
# neighbors that respect angles respectively to indices_
neighbors_that_respect_angle = neighbors[respect_angles]
if neighbors_that_respect_angle.shape[0] > 0:
flee = np.mean(dist[neighbors_that_respect_angle, :], axis=0)
norm_flee = np.linalg.norm(flee)
self.steering[ind, :] -= FLEE_FORCE[n_specie] * flee / norm_flee
class Boids_(Physics2D):
"""
Boids class, inherited from Physics2D, define the behavior of boids
:param num_entities: number of boids for each species, (ex: [12,13] mean 12 boids for species 1 and
13 for species 2)
:param positions: initial positions of boids, 2D array of shape (n_boids, 2)
:param velocities: initial velocities of boids, 2D array of shape (n_boids, 2)
:param min_speed: min speed (l2-norm) of boids
:param max_speed: max speed (l2-norm) of boids
:param max_turn: max angle change a boid can have on one step, pi/48 by default.
"""
def __init__(self, num_entities, positions, velocities,
min_speed, max_speed, max_turn, dt=1):
super().__init__(positions, velocities,
min_speed, max_speed, max_turn, dt)
self.list_number_boids = num_entities
self.list_indices_boids = list()
self.init_list_indices_boids()
self.dict_indices = dict()
self.build_grid()
self.positions = positions
self.grid = \
[[[] for _ in range(int((WINDOW_SIZE[0] / LEAF_SIZE)))]
for _ in range(int(WINDOW_SIZE[1] / LEAF_SIZE))]
self.steering = np.zeros(shape=(self.nb_entities, 2))
def update_boids(self):
"""
calculate acceleration for each boid with the rules.
"""
self.update_grid()
# calculate steering with rules
# apply sca for each type of boids
self.apply_sca()
self.update(self.steering)
def init_list_indices_boids(self):
sum_ = 0
for i in range(len(self.list_number_boids)):
self.list_indices_boids.append(np.arange(sum_, sum_ + self.list_number_boids[i]))
sum_ += self.list_number_boids[i]
def apply_sca(self):
"""
apply separation, cohesion, alignment, chase and flee rules
"""
self.steering = np.zeros(shape=(self.nb_entities, 2))
timer1 = time.time()
for i in np.arange(int(WINDOW_SIZE[0] / LEAF_SIZE)):
for j in np.arange(int(WINDOW_SIZE[1] / LEAF_SIZE)):
# population from grid (i,j)
grid = self.grid[i][j]
# get neighbors from part (i,j) of the grid
neighbors_from_grid = list()
for (i_, j_) in self.dict_indices[i, j]:
neighbors_from_grid += self.grid[i_][j_]
# for each boids of each species in the grid part, we apply the rules
for ind_species, n_species in zip(self.list_indices_boids, np.arange(len(self.list_indices_boids))):
# look if there are Boids from this species in the indexes
neighbors_from_grid_species = list() # neighbors from grid from a specific species
ind_to_run = list()
for ind in ind_species:
if ind in neighbors_from_grid:
neighbors_from_grid_species.append(ind)
if ind in grid:
ind_to_run.append(ind)
for ind in ind_to_run:
if neighbors_from_grid_species:
self.apply_cohesion_separation_alignment(ind, neighbors_from_grid_species, n_species)
# TODO: change chasing and fleeing methods
# apply chasing and fleeing relations
"""
for i_ in range(RELATIONS_SPECIES.shape[0]):
for j_ in range(RELATIONS_SPECIES.shape[0]):
# species i chase species j
if RELATIONS_SPECIES[i_, j_] == 1:
self.apply_chasing(list_species_indices[i_], list_species_indices[j_], i_)
# species i flee species j
if RELATIONS_SPECIES[i_, j_] == -1:
self.apply_fleeing(list_species_indices[i_], list_species_indices[j_], i_)
"""
timer2 = time.time()
print("time for one timestep:", timer2 - timer1)
def build_grid(self):
"""
create a grid which consist in a numpy matrix of lists
"""
size_grid = int(WINDOW_SIZE[0] / LEAF_SIZE) - 1 # 9
self.dict_indices = np.empty(shape=(size_grid + 1, size_grid + 1), dtype=list)
# initialise with empty lists
print(self.dict_indices.shape)
for i in np.arange(size_grid + 1):
for j in np.arange(size_grid + 1):
self.dict_indices[i, j] = list()
for i in np.arange(size_grid + 1):
for j in np.arange(size_grid + 1):
self.dict_indices[i, j].append((i, j))
# all particular cases
if i == 0 and j == 0:
self.dict_indices[i, j].append((i + 1, j))
self.dict_indices[i, j].append((i, j + 1))
self.dict_indices[i, j].append((i + 1, j + 1))
elif i == 0 and j == size_grid:
self.dict_indices[i, j].append((i + 1, j))
self.dict_indices[i, j].append((i, j - 1))
self.dict_indices[i, j].append((i + 1, j - 1))
elif i == size_grid and j == 0:
self.dict_indices[i, j].append((i - 1, j))
self.dict_indices[i, j].append((i, j + 1))
self.dict_indices[i, j].append((i - 1, j + 1))
elif i == size_grid and j == size_grid:
self.dict_indices[i, j].append((i - 1, j))
self.dict_indices[i, j].append((i, j - 1))
self.dict_indices[i, j].append((i - 1, j - 1))
elif i == 0 and j in np.arange(size_grid):
self.dict_indices[i, j].append((i, j + 1))
self.dict_indices[i, j].append((i, j - 1))
self.dict_indices[i, j].append((i + 1, j))
self.dict_indices[i, j].append((i + 1, j + 1))
self.dict_indices[i, j].append((i + 1, j - 1))
elif j == 0 and i in np.arange(size_grid):
self.dict_indices[i, j].append((i + 1, j))
self.dict_indices[i, j].append((i - 1, j))
self.dict_indices[i, j].append((i, j + 1))
self.dict_indices[i, j].append((i + 1, j + 1))
self.dict_indices[i, j].append((i - 1, j + 1))
elif i == size_grid and j in np.arange(size_grid):
self.dict_indices[i, j].append((i - 1, j))
self.dict_indices[i, j].append((i - 1, j - 1))
self.dict_indices[i, j].append((i - 1, j + 1))
self.dict_indices[i, j].append((i, j + 1))
self.dict_indices[i, j].append((i, j - 1))
elif j == size_grid and i in np.arange(size_grid):
self.dict_indices[i, j].append((i, j))
self.dict_indices[i, j].append((i - 1, j))
self.dict_indices[i, j].append((i + 1, j - 1))
self.dict_indices[i, j].append((i + 1, j - 1))
self.dict_indices[i, j].append((i - 1, j - 1))
else:
self.dict_indices[i, j].append((i + 1, j))
self.dict_indices[i, j].append((i - 1, j))
self.dict_indices[i, j].append((i, j + 1))
self.dict_indices[i, j].append((i, j - 1))
self.dict_indices[i, j].append((i + 1, j + 1))
self.dict_indices[i, j].append((i + 1, j - 1))
self.dict_indices[i, j].append((i - 1, j + 1))
self.dict_indices[i, j].append((i - 1, j - 1))
def update_grid(self):
"""
update the grid
"""
self.grid = [[[] for _ in range(int((WINDOW_SIZE[0] / LEAF_SIZE)))]
for _ in range(int(WINDOW_SIZE[1] / LEAF_SIZE))]
indices_ = np.concatenate((np.array(np.floor(self.positions[:, 0] / LEAF_SIZE), dtype=int)[:, np.newaxis],
np.array(np.floor(self.positions[:, 1] / LEAF_SIZE), dtype=int)[:, np.newaxis],
np.array(range(self.nb_entities), dtype=int)[:, np.newaxis]), axis=1)
for i, j, k in indices_:
self.grid[i][j].append(k)
def apply_cohesion_separation_alignment(self, ind, indices, n_specie):
"""
apply cohesion rule to boids which have indices :indices: for species :n_specie:
:param indices: indices to apply the rule
:n_specie: the species, an integer between 0 and num_species (used for parameters)
"""
if len(indices) <= 1:
return
indices_ = list(indices)
indices_.remove(ind)
indices_ = np.array(indices_)
diff_ind = np.array(self.positions[indices_, :] - self.positions[ind, :])
dist = np.linalg.norm(diff_ind, axis=1)
# neighbors indices relatively to indices_
neighbors = np.where(dist < BOID_VIEW[n_specie])[0]
if neighbors.shape[0] > 0:
# true_neighbors: true indices for positions and velocities
# neighbors: indices of neighbors relatively to indices_
true_neighbors = indices_[neighbors]
diff = self.positions[true_neighbors, :] \
- self.positions[ind, :]
with np.errstate(invalid='ignore'):
respect_angles = \
np.where(angle_between(self.velocities[ind, :], diff)
<= BOID_VIEW_ANGLE[n_specie] / 2)[0]
# respect_angles: indices of neighbors
# relatively to true_neighbors
final_true_neighbors = true_neighbors[respect_angles]
if final_true_neighbors.shape[0] > 0:
mean_vel = np.mean(self.velocities[final_true_neighbors, :], axis=0)
norm_vel = np.linalg.norm(mean_vel)
if norm_vel > EPSILON:
self.steering[ind, :] += ALIGNMENT_FORCE[n_specie] * \
(mean_vel / norm_vel)
cohesion = np.mean(self.positions[final_true_neighbors, :],
axis=0) - self.positions[ind, :]
norm_cohesion = np.linalg.norm(cohesion)
if norm_cohesion > EPSILON:
self.steering[ind, :] += COHESION_FORCE[n_specie] * \
(cohesion / norm_cohesion)
# get the distances
# l2 norms.
dist_norm = np.linalg.norm(self.positions[final_true_neighbors, :]
- self.positions[ind, :], axis=1)
dist = self.positions[final_true_neighbors, :] \
- self.positions[ind, :]
neighbors = np.where(dist_norm <= SEPARATION_DIST[n_specie])[0]
if neighbors.shape[-1] != 0:
respect_angles = \
np.where(angle_between(self.velocities[ind, :], dist[neighbors])
<= BOID_VIEW_ANGLE[n_specie] / 2)[0]
# neighbors that respect angles respectively to indices_
neighbors_that_respect_angle = neighbors[respect_angles]
if neighbors_that_respect_angle.shape[0] > 0:
vector_separation = dist[neighbors_that_respect_angle, :]
norm_vector_separation = np.abs(vector_separation)
vector_separation = np.divide(vector_separation, norm_vector_separation)
sep = np.sum(vector_separation, axis=0)
self.steering[ind, :] -= SEPARATION_FORCE[n_specie] * sep
# sep/dist_norm?
def apply_chasing(self, indices_chase, indices_chased, n_specie):
"""
apply alignment rule to boids which have indices :indices: for species number :n_specie:
alignment -> boids tends to move like their neighbors
:param indices: indices of boids to apply the rule
:n_specie: the species, an integer between 0 and num_species (used for parameters)
"""
if len(indices_chase) == 0 or len(indices_chased) == 0:
return
# we take each that are chasing
for ind in indices_chase:
indices_ = list(indices_chased)
indices_ = np.array(indices_)
dist = np.linalg.norm(self.positions[indices_, :]
- self.positions[ind, :], axis=1)
# neighbors indices relatively to indices_
neighbors = np.where(dist < BOID_VIEW[n_specie])[0]
if neighbors.shape[0] > 0:
# true_neighbors: true indices for positions and velocities
# neighbors: indices of neighbors relatively to indices_
true_neighbors = indices_[neighbors]
diff = self.positions[true_neighbors, :] \
- self.positions[ind, :]
with np.errstate(invalid='ignore'):
respect_angles = \
np.where(angle_between(self.velocities[ind, :], diff)
<= BOID_VIEW_ANGLE[n_specie] / 2)[0]
# respect_angles: indices of neighbors
# relatively to true_neighbors
final_true_neighbors = true_neighbors[respect_angles]
if final_true_neighbors.shape[0] > 0:
cohesion = np.mean(self.positions[final_true_neighbors, :],
axis=0) - self.positions[ind, :]
norm_cohesion = np.linalg.norm(cohesion)
if norm_cohesion > EPSILON:
self.steering[ind, :] += CHASING_FORCE[n_specie] * \
(cohesion / norm_cohesion)
def apply_fleeing(self, indices_flee, indices_fleed, n_specie):
if len(indices_flee) == 0 or len(indices_fleed) == 0:
return
# we take each that are fleeing
for ind in indices_flee:
indices_ = list(indices_fleed)
indices_ = np.array(indices_)
# get the distances
dist_norm = np.linalg.norm(self.positions[indices_, :]
- self.positions[ind, :], axis=1)
dist = self.positions[indices_, :] \
- self.positions[ind, :]
neighbors = np.where(dist_norm <= BOID_VIEW[n_specie])[0]
if neighbors.shape[-1] != 0:
respect_angles = \
np.where(angle_between(self.velocities[ind, :], dist[neighbors])
<= BOID_VIEW_ANGLE[n_specie] / 2)[0]
# neighbors that respect angles respectively to indices_
neighbors_that_respect_angle = neighbors[respect_angles]
if neighbors_that_respect_angle.shape[0] > 0:
flee = np.mean(dist[neighbors_that_respect_angle, :], axis=0)
norm_flee = np.linalg.norm(flee)
self.steering[ind, :] -= FLEE_FORCE[n_specie] * flee / norm_flee
| 40.440171
| 116
| 0.535877
| 3,307
| 28,389
| 4.397339
| 0.056849
| 0.009352
| 0.047449
| 0.047311
| 0.863843
| 0.848508
| 0.833379
| 0.817769
| 0.804635
| 0.785449
| 0
| 0.014424
| 0.365036
| 28,389
| 701
| 117
| 40.49786
| 0.7923
| 0.170277
| 0
| 0.760417
| 0
| 0
| 0.003034
| 0
| 0
| 0
| 0
| 0.001427
| 0
| 1
| 0.046875
| false
| 0
| 0.013021
| 0
| 0.085938
| 0.007813
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
64d808ea397443a9ffb06cb13393bf3a99713fbe
| 4,902
|
py
|
Python
|
tests/test_unit/test_rules.py
|
wsprague-nu/MINE-Database
|
4f9a4bf9d163e96da25a4dc8e5dffc9530fdc427
|
[
"MIT"
] | 4
|
2020-07-13T17:04:42.000Z
|
2021-08-04T15:53:10.000Z
|
tests/test_unit/test_rules.py
|
wsprague-nu/MINE-Database
|
4f9a4bf9d163e96da25a4dc8e5dffc9530fdc427
|
[
"MIT"
] | 5
|
2020-01-29T21:31:24.000Z
|
2021-11-16T16:17:23.000Z
|
tests/test_unit/test_rules.py
|
wsprague-nu/MINE-Database
|
4f9a4bf9d163e96da25a4dc8e5dffc9530fdc427
|
[
"MIT"
] | 6
|
2019-10-23T05:10:12.000Z
|
2021-11-12T20:26:12.000Z
|
"""Tests for rules.py using pytest."""
import json
from pathlib import Path
from minedatabase import pickaxe
from minedatabase.rules import BNICE, metacyc_generalized, metacyc_intermediate
file_path = Path(__file__)
file_dir = file_path.parent
with open((file_dir / "../data/test_rules/rules_to_assert.json"), "r") as f:
rule_assert_dict = json.load(f)
def test_metacyc_generalized_full():
rule_list, correactant_list, rule_name = metacyc_generalized()
pk = pickaxe.Pickaxe(rule_list=rule_list, coreactant_list=correactant_list)
assert rule_name == "Metacyc_generalized"
assert len(pk.operators) == 1216
assert len(pk.coreactants) == 45
assert (
pk.operators["rule0001"][1]["SMARTS"]
== rule_assert_dict["Metacyc_generalized_rule0001"]
)
def test_metacyc_generalized_specify_number():
rule_list, correactant_list, rule_name = metacyc_generalized(n_rules=10)
pk = pickaxe.Pickaxe(rule_list=rule_list, coreactant_list=correactant_list)
assert rule_name == "Metacyc_generalized_10_rules"
assert len(pk.operators) == 10
assert len(pk.coreactants) == 45
assert (
pk.operators["rule0001"][1]["SMARTS"]
== rule_assert_dict["Metacyc_generalized_rule0001"]
)
def test_metacyc_generalized_specify_fraction():
rule_list, correactant_list, rule_name = metacyc_generalized(fraction_coverage=0.9)
pk = pickaxe.Pickaxe(rule_list=rule_list, coreactant_list=correactant_list)
assert rule_name == "Metacyc_generalized_0,9_fraction_coverage"
assert len(pk.operators) == 413
assert len(pk.coreactants) == 45
assert (
pk.operators["rule0001"][1]["SMARTS"]
== rule_assert_dict["Metacyc_generalized_rule0001"]
)
def test_metacyc_exclude():
rule_list, correactant_list, rule_name = metacyc_generalized(
fraction_coverage=0.9, exclude_containing=["aromatic", "halogen"]
)
pk = pickaxe.Pickaxe(rule_list=rule_list, coreactant_list=correactant_list)
assert rule_name == "Metacyc_generalized_0,9_fraction_coverage_with_exclusion"
assert len(pk.operators) == 839
assert len(pk.coreactants) == 45
def test_metacyc_include():
rule_list, correactant_list, rule_name = metacyc_generalized(
fraction_coverage=0.9, include_containing=["aromatic", "halogen"]
)
pk = pickaxe.Pickaxe(rule_list=rule_list, coreactant_list=correactant_list)
assert rule_name == "Metacyc_generalized_0,9_fraction_coverage_with_inclusion"
assert len(pk.operators) == 377
assert len(pk.coreactants) == 45
def test_metacyc_intermediate():
rule_list, correactant_list, rule_name = metacyc_intermediate()
pk = pickaxe.Pickaxe(rule_list=rule_list, coreactant_list=correactant_list)
assert rule_name == "Metacyc_intermediate"
assert len(pk.operators) == 7154
assert len(pk.coreactants) == 45
assert (
pk.operators["rule0001_0167"][1]["SMARTS"]
== rule_assert_dict["Metacyc_intermediate_rule0001_0167"]
)
def test_metacyc_intermediate_specify_number():
rule_list, correactant_list, rule_name = metacyc_intermediate(n_rules=20)
pk = pickaxe.Pickaxe(rule_list=rule_list, coreactant_list=correactant_list)
assert rule_name == "Metacyc_intermediate_20_rules"
assert len(pk.operators) == 20
assert len(pk.coreactants) == 45
def test_metacyc_intermediate_specify_fraction():
rule_list, correactant_list, rule_name = metacyc_intermediate(fraction_coverage=0.2)
pk = pickaxe.Pickaxe(rule_list=rule_list, coreactant_list=correactant_list)
assert rule_name == "Metacyc_intermediate_0,2_fraction_coverage"
assert len(pk.operators) == 75
assert len(pk.coreactants) == 45
def test_metacyc_intermediate_exclude():
rule_list, correactant_list, rule_name = metacyc_intermediate(
fraction_coverage=0.9, exclude_containing=["aromatic", "halogen"]
)
pk = pickaxe.Pickaxe(rule_list=rule_list, coreactant_list=correactant_list)
assert rule_name == "Metacyc_intermediate_0,9_fraction_coverage_with_exclusion"
assert len(pk.operators) == 5775
assert len(pk.coreactants) == 45
def test_metacyc_intermediate_include():
rule_list, correactant_list, rule_name = metacyc_intermediate(
fraction_coverage=0.9, include_containing=["halogen"]
)
pk = pickaxe.Pickaxe(rule_list=rule_list, coreactant_list=correactant_list)
assert rule_name == "Metacyc_intermediate_0,9_fraction_coverage_with_inclusion"
assert len(pk.operators) == 67
assert len(pk.coreactants) == 45
def test_BNICE():
rule_list, correactant_list, rule_name = BNICE()
pk = pickaxe.Pickaxe(rule_list=rule_list, coreactant_list=correactant_list)
assert rule_name == "BNICE"
assert len(pk.operators) == 250
assert len(pk.coreactants) == 33
assert pk.operators["1.1.1_01"][1]["SMARTS"] == rule_assert_dict["BNICE_1.1.1_01"]
| 33.806897
| 88
| 0.743982
| 628
| 4,902
| 5.457006
| 0.119427
| 0.077035
| 0.121973
| 0.073826
| 0.852349
| 0.824044
| 0.783484
| 0.774438
| 0.71958
| 0.612489
| 0
| 0.033951
| 0.152795
| 4,902
| 144
| 89
| 34.041667
| 0.791235
| 0.006528
| 0
| 0.373737
| 0
| 0
| 0.145765
| 0.107525
| 0
| 0
| 0
| 0
| 0.444444
| 1
| 0.111111
| false
| 0
| 0.040404
| 0
| 0.151515
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b3b9e5683ca77214bfa0d9523ff647bb800a6671
| 134
|
py
|
Python
|
ai_workshops/None.py
|
AIDA-UIUC/ai_workshops
|
2b0750c422df73c801b76fc9910f872235a198e8
|
[
"Apache-2.0"
] | 2
|
2020-11-07T18:58:42.000Z
|
2020-11-08T18:41:07.000Z
|
ai_workshops/None.py
|
AIDA-UIUC/ai_workshops
|
2b0750c422df73c801b76fc9910f872235a198e8
|
[
"Apache-2.0"
] | 3
|
2020-11-13T13:40:10.000Z
|
2022-02-26T10:08:10.000Z
|
ai_workshops/None.py
|
AIDA-UIUC/ai_workshops
|
2b0750c422df73c801b76fc9910f872235a198e8
|
[
"Apache-2.0"
] | 1
|
2021-01-24T05:59:41.000Z
|
2021-01-24T05:59:41.000Z
|
# Cell
import numpy as np
# Cell
from fastai.vision.all import *
# Cell
import numpy as np
# Cell
from fastai.vision.all import *
| 10.307692
| 31
| 0.716418
| 22
| 134
| 4.363636
| 0.409091
| 0.208333
| 0.3125
| 0.354167
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0.208955
| 134
| 13
| 32
| 10.307692
| 0.90566
| 0.141791
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 14
|
b3cc148eeee32277517b22629618195846bcdf39
| 13,604
|
py
|
Python
|
Hi.py
|
ayoubbbu/ayoub
|
1c4534688f5bfa03bf670fb2aebb60662c17c23a
|
[
"Apache-2.0"
] | 1
|
2021-08-02T19:23:52.000Z
|
2021-08-02T19:23:52.000Z
|
Hi.py
|
ayoubbbu/ayoub
|
1c4534688f5bfa03bf670fb2aebb60662c17c23a
|
[
"Apache-2.0"
] | null | null | null |
Hi.py
|
ayoubbbu/ayoub
|
1c4534688f5bfa03bf670fb2aebb60662c17c23a
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*- import os, sys, time, datetime, random, hashlib, re, threading, json, getpass, urllib from multiprocessing.pool import ThreadPool #udah nyampe di sini ea ubah author ataupun ngerecode semoga emak bapaknya mati dalam keadaan mengenaskan #buat yg nyampe di sini cuman buat mempelajari pemrograman dan beberapa fungsinya ane ucapin selamat berjuang #tapi awaslu yg nge recode ataupun mengganti author try: import mechanize except ImportError: os.system('pip2 install mechanize') else: try: import requests except ImportError: os.system('pip2 install requests') def entertools(): os.system('sh babi_buat_yang_ngerecode_thanks.sh') def restart(): ngulang = sys.executable os.execl(ngulang, ngulang, *sys.argv) def wa(): os.system('xdg-open https://api.whatsapp.com/send?phone=62895704349609&text=Assalamualaikum') def menutools(): os.system ('clear') print ' ┏━╮╭━┓ \033[1;37m[\033[1;33m+\033[1;37m] TOOLS HACK ALL IN ONE\n \033[1;31m┃┏┗┛┓┃ \033[1;35m* \033[1;37mAuthor \033[1;31m: \033[1;36mMochammad Nopal Attasya\n \033[1;31m╰┓▋▋┏╯ \033[1;37mEmail \033[1;31m: \033[1;32mnopal7568@gmail.com\n\033[1;31m╭━┻╮╲┗━━━━╮╭╮ \033[1;37mYoutube\033[1;31m: \033[1;32mPAJAOQ\n\033[1;31m┃▎▎┃╲╲╲╲╲╲┣━╯ \033[1;35m* \033[1;33mTools Berisi 20 Tools Hacking.\n\033[1;31m╰━┳┻▅╯╲╲╲╲┃ \033[1;33mJauhi Larangan Yang Ada \033[1;37m^_^\n \033[1;31m ╰━┳┓┏┳┓┏╯ \033[1;35m+ \033[1;33mHargai Author Karena Memakai\n \033[1;31m┗┻┛┗┻┛ \033[1;33m Tidak Sesulit Membuat \033[1;37m:-D' loding2() print '\033[1;35m╔══════╗╔═════════════════════════════╗╔══════╗\n\033[1;35m║ \033[1;37m NO\033[1;37m \033[1;35m║║ \033[1;37m{\033[1;32mDAFTAR TOOLS HACK LENGKAP\033[1;37m} \033[1;35m║║\033[0mSTATUS\033[1;35m║\n\033[1;35m╚══════╝╚═════════════════════════════╝╚══════╝\n\033[1;31m╔══════╗╔═════════════════════════════╗╔══════╗\n\033[1;31m║ \033[1;37m{\033[1;32m01\033[1;37m} \033[1;31m║║ \033[1;37m{\033[1;32mDARK FACEBOOK\033[1;37m} \033[1;31m║║ \033[1;37m{\033[1;32mON\033[1;37m} \033[1;31m║\n\033[1;31m╚══════╝╚═════════════════════════════╝╚══════╝\n\033[1;31m╔══════╗╔═════════════════════════════╗╔══════╗\n\033[1;31m║ \033[1;37m{\033[1;32m02\033[1;37m} \033[1;31m║║ \033[1;37m{\033[1;32mMULTI BRUTEFORCE FACEBOOK\033[1;37m} \033[1;31m║║ \033[1;37m{\033[1;32mON\033[1;37m} \033[1;31m║\n\033[1;31m╚══════╝╚═════════════════════════════╝╚══════╝\n\033[1;31m╔══════╗╔═════════════════════════════╗╔══════╗\n\033[1;31m║ \033[1;37m{\033[1;32m03\033[1;37m} \033[1;31m║║ \033[1;37m{\033[1;32mHACK INSTAGRAM \033[1;37m(\033[1;36mNO ROOT\033[1;37m)} \033[1;31m║║ \033[1;37m{\033[1;32mON\033[1;37m}\033[1;31m ║\n\033[1;31m╚══════╝╚═════════════════════════════╝╚══════╝\n\033[1;31m╔══════╗╔═════════════════════════════╗╔══════╗\n\033[1;31m║ \033[1;37m{\033[1;32m04\033[1;37m} \033[1;31m║║ \033[1;37m{\033[1;32mHACK MAIL MULTIBRUTEFORCE\033[1;37m} \033[1;31m║║ \033[1;37m{\033[1;32mON\033[1;37m} \033[1;31m║\n\033[1;31m╚══════╝╚═════════════════════════════╝╚══════╝\n\033[1;31m╔══════╗╔═════════════════════════════╗╔══════╗\n\033[1;31m║ \033[1;37m{\033[1;32m05\033[1;37m} \033[1;31m║║ \033[1;37m{\033[1;32mVIRTEX WHATSAPP\033[1;37m} \033[1;31m║║ \033[1;37m{\033[1;32mON\033[1;37m} \033[1;31m║\n\033[1;31m╚══════╝╚═════════════════════════════╝╚══════╝\n\033[1;31m╔══════╗╔═════════════════════════════╗╔══════╗\n\033[1;31m║ \033[1;37m{\033[1;32m06\033[1;37m} \033[1;31m║║ \033[1;37m{\033[1;32mDAFTAR WEBSITE VULN\033[1;37m} \033[1;31m║║ \033[1;37m{\033[1;32mON\033[1;37m} \033[1;31m║\n\033[1;31m╚══════╝╚═════════════════════════════╝╚══════╝\n\033[1;31m╔══════╗╔═════════════════════════════╗╔══════╗\n\033[1;31m║ \033[1;37m{\033[1;32m07\033[1;37m} \033[1;31m║║ \033[1;37m{\033[1;32mSPAM SMS\033[1;37m} \033[1;31m║║ \033[1;37m{\033[1;32mON\033[1;37m}\033[1;31m ║\n\033[1;31m╚══════╝╚═════════════════════════════╝╚══════╝\n\033[1;31m╔══════╗╔═════════════════════════════╗╔══════╗\n\033[1;31m║ \033[1;37m{\033[1;32m08\033[1;37m} \033[1;31m║║ \033[1;37m{\033[1;32mCHAT ADMIN\033[1;37m} \033[1;31m║║ \033[1;37m{\033[1;32mON\033[1;37m}\033[1;31m ║\n\033[1;31m╚══════╝╚═════════════════════════════╝╚══════╝\n\033[1;31m╔══════╗╔═════════════════════════════╗╔══════╗\n\033[1;31m║ \033[1;37m{\033[1;32m09\033[1;37m} \033[1;31m║║ \033[1;37m{\033[1;32mJOIN C3L3N6 CYBER\033[1;37m} \033[1;31m║║ \033[1;37m{\033[1;32mON\033[1;37m}\033[1;31m ║\n\033[1;31m╚══════╝╚═════════════════════════════╝╚══════╝\n\033[1;31m╔══════╗╔═════════════════════════════╗╔══════╗\n\033[1;31m║ \033[1;37m{\033[1;32m10\033[1;37m} \033[1;31m║║ \033[1;37m{\033[1;32mHACK WIFI \033[1;37m(\033[1;36mROOT\033[1;37m} \033[1;31m║║ \033[1;37m{\033[1;32mON\033[1;37m} \033[1;31m║\n\033[1;31m╚══════╝╚═════════════════════════════╝╚══════╝\n\033[1;31m╔══════╗╔═════════════════════════════╗╔══════╗\n\033[1;31m║ \033[1;37m{\033[1;32m11\033[1;37m} \033[1;31m║║ \033[1;37m{\033[1;32mHACK INSTAGRAM \033[1;37m(\033[1;36mROOT\033[1;37m)\033[1;37m} \033[1;31m║║ \033[1;37m{\033[1;32mON\033[1;37m} \033[1;31m║\n\033[1;31m╚══════╝╚═════════════════════════════╝╚══════╝\n\033[1;31m╔══════╗╔═════════════════════════════╗╔══════╗\n\033[1;31m║ \033[1;37m{\033[1;32m12\033[1;37m} \033[1;31m║║ \033[1;37m{\033[1;32mHACK PULSA\033[1;37m} \033[1;31m║║\033[1;37m{\033[1;31mCOID\033[1;37m}\033[1;31m║\n\033[1;31m╚══════╝╚═════════════════════════════╝╚══════╝\n\033[1;31m╔══════╗╔═════════════════════════════╗╔══════╗\n\033[1;31m║ \033[1;37m{\033[1;32m13\033[1;37m} \033[1;31m║║ \033[1;37m{\033[1;32mHACK DIAMOND FREE FIRE\033[1;37m} \033[1;31m║║\033[1;37m{\033[1;33mMAIN\033[1;37m}\033[1;31m║\n\033[1;31m╚══════╝╚═════════════════════════════╝╚══════╝\n\033[1;31m╔══════╗╔═════════════════════════════╗╔══════╗\n\033[1;31m║ \033[1;37m{\033[1;32m14\033[1;37m} \033[1;31m║║ \033[1;37m{\033[1;32mHACK UC PUBG\033[1;37m} \033[1;31m║║\033[1;37m{\033[1;33mMAIN\033[1;37m}\033[1;31m║\n\033[1;31m╚══════╝╚═════════════════════════════╝╚══════╝\n\033[1;31m╔══════╗╔═════════════════════════════╗╔══════╗\n\033[1;31m║ \033[1;37m{\033[1;32m15\033[1;37m} \033[1;31m║║ \033[1;37m{\033[1;32mHACK CP CODM\033[1;37m} \033[1;31m║║\033[1;37m{\033[1;33mMAIN\033[1;37m}\033[1;31m║\n\033[1;31m╚══════╝╚═════════════════════════════╝╚══════╝\n\033[1;31m╔══════╗╔═════════════════════════════╗╔══════╗\n\033[1;31m║ \033[1;37m{\033[1;32m16\033[1;37m} \033[1;31m║║ \033[1;37m{\033[1;32mBUG HUNTERS\033[1;37m} \033[1;31m║║ \033[1;37m{\033[1;32mON\033[1;37m} \033[1;31m║\n\033[1;31m╚══════╝╚═════════════════════════════╝╚══════╝\n\033[1;31m╔══════╗╔═════════════════════════════╗╔══════╗\n\033[1;31m║ \033[1;37m{\033[1;32m17\033[1;37m} \033[1;31m║║ \033[1;37m{\033[1;32mK-DORK\033[1;37m} \033[1;31m║║ \033[1;37m{\033[1;32mON\033[1;37m} \033[1;31m║\n\033[1;31m╚══════╝╚═════════════════════════════╝╚══════╝\n\033[1;31m╔══════╗╔═════════════════════════════╗╔══════╗\n\033[1;31m║ \033[1;37m{\033[1;32m18\033[1;37m} \033[1;31m║║ \033[1;37m{\033[1;32mSEND VIRUS\033[1;37m} \033[1;31m║║ \033[1;37m{\033[1;32mON\033[1;37m} \033[1;31m║\n\033[1;31m╚══════╝╚═════════════════════════════╝╚══════╝\n\033[1;31m╔══════╗╔═════════════════════════════╗╔══════╗\n\033[1;31m║ \033[1;37m{\033[1;32m19\033[1;37m} \033[1;31m║║ \033[1;37m{\033[1;32mSHORTNER LINKS\033[1;37m} \033[1;31m║║ \033[1;37m{\033[1;32mON\033[1;37m} \033[1;31m║\n\033[1;31m╚══════╝╚═════════════════════════════╝╚══════╝\n\033[1;31m╔══════╗╔═════════════════════════════╗╔══════╗\n\033[1;31m║ \033[1;37m{\033[1;32m20\033[1;37m} \033[1;31m║║ \033[1;37m{\033[1;32mNUYUL APLIKASI CAPING\033[1;37m} \033[1;31m║║ \033[1;37m{\033[1;32mON\033[1;37m} \033[1;31m║\n\033[1;31m╚══════╝╚═════════════════════════════╝╚══════╝\n\033[1;31m╔══════╗╔═════════════════════════════╗╔══════╗\n\033[1;31m║ \033[1;37m{\033[1;32m21\033[1;37m} \033[1;31m║║ \033[1;37m{\033[1;32mNUYUL APLIKASI FLASHGO\033[1;37m} \033[1;31m║║ \033[1;37m{\033[1;32mON\033[1;37m} \033[1;31m║\n\033[1;31m╚══════╝╚═════════════════════════════╝╚══════╝\n\033[1;31m╔══════╗╔═════════════════════════════╗╔══════╗\n\033[1;31m║ \033[1;37m{\033[1;32m22\033[1;37m} \033[1;31m║║ \033[1;37m{\033[1;32mHACK DIAMOND MLBB\033[1;37m} \033[1;31m║║\033[1;37m{\033[1;31mCOID\033[1;37m}\033[1;31m║\n\033[1;31m╚══════╝╚═════════════════════════════╝╚══════╝\n\033[1;31m╔══════╗╔═════════════════════════════╗╔══════╗\n\033[1;31m║ \033[1;37m{\033[1;32m22\033[1;37m} \033[1;31m║║ \033[1;37m{\033[1;32mHACK FACEBOOK TARGET\033[1;37m} \033[1;31m║║ \033[1;37m{\033[1;32mON\033[1;37m} \033[1;31m║\n\033[1;31m╚══════╝╚═════════════════════════════╝╚══════╝\n\n\033[1;31m╔══════╗╔═════════════════════════════╗╔══════╗\n\033[1;31m║ \033[1;37m{\033[1;32m00\033[1;37m} \033[1;31m║║ \033[1;37m{\033[1;34mKELUAR PROGRAM\033[1;37m} \033[1;31m║║ \033[1;31mEXIT \033[1;31m║\n\033[1;31m╚══════╝╚═════════════════════════════╝╚══════╝\n\033[1;31m╔══════╗╔═════════════════════════════╗╔══════╗\n\033[1;31m║ \033[1;37m{\033[1;32m++\033[1;37m} \033[1;31m║║ \033[1;37m{\033[1;34mSUBCRIBE PAJAOQ\033[1;37m} \033[1;31m║║ \033[1;37mSUBS \033[1;31m║\n\033[1;31m╚══════╝╚═════════════════════════════╝╚══════╝' def ressture(): os.system('clear') print '\x1b[1;33m╔╦══════════════════════════════════╗\n║║ Sudah punya ID dan Password nya? ║\n╚╣╔═════════════════════════════════╝\n╔╝╚═════════════════════╗' print '\x1b[1;33m║LOGIN UNTUK MELANJUTKAN║\n╠═══════════════════════╝' user = raw_input('║ID : ') import getpass sandi = raw_input('║PW : ') if sandi == 'channel pajaoq' and user == 'subscribe': print '║LOGIN SUKSES\n╚═══════\x1b[1;91m▶' sys.exit else: print 'Login GAGAL, Silahkan hubungi ADMIN' wa() ressture() def tik(): titik = [ ' ', '. ', '.. ', '...', '.. ', '. ', ' '] for o in titik: print '\r\x1b[1;91m [\xe2\x97\x8f] \x1b[1;92mLoading \x1b[1;97m' + o, sys.stdout.flush() time.sleep(0.7) def loding2(): looding2 = [ '[|]', '[/]', '[-]', '[\]', '[|]', '[/]', '[-]', '[\]', '[|]', '[/]', '[-]', '[\]', '[|]', '[/]', '[-]', '[\]', '[|]', '[/]', '[-]', '[\]', '[|]', '[/]', '[-]', '[\]', '[|]', '[/]', '[-]', '[\]', '[|]', '[/]', '[-]', '[\]', '[|]', '[/]', '[-]', '[\]', '[|]', '[/]', '[-]', '[\]', '[|]', '[/]', '[-]', '[\]', '[|]', '[/]', '[-]', '[\]', '[|]', '[/]', '[-]', '[\]', '[|]', '[/]', '[-]', '[\]', '[|]', '[/]', '[-]', '[\]', '[|]', '[/]', '[-]', '[\]', '[|]', '[/]', '[-]', '[\]', '[|]', '[/]', '[-]', '[\]', '[|]', '[/]', '[-]', '[\]', '[|]', '[/]', '[-]', '[\]', '[|]', '[/]', '[-]', '[\]', '[|]', '[/]', '[-]', '[\]', '[|]', '[/]', '[-]', '[\]', '[|]', '[/]', '[-]', '[\]', '[|]', '[/]', '[-]', '[\]', '[|]', '[/]', '[-]', '[\]', '[|]', '[/]', '[-]', '[\]', '[|]', '[/]', '[-]', '[\]', '[|]', '[/]', '[-]', '[\]', '[|]', '[/]', '[-]', '[\]', '[|]', '[\033[1;32m✓\033[0m]\n'] for o in looding2: print '\r\x1b[1;91m[\xe2\x97\x8f] \x1b[1;92mCheck \x1b[1;97m' + o, sys.stdout.flush() time.sleep(0.1) def lodhirt(): lodhirt = [ 'PAJAOQ', ' ', 'PAJAOQ', ' ', 'PAJAOQ', ' ', 'PAJAOQ', ' ', 'PAJAOQ', ' ', 'PAJAOQ', ' ', 'PAJAOQ', ' ', 'PAJAOQ', ' ', 'PAJAOQ', ' ', 'PAJAOQ', 'PAJAOQ', ' ', 'PAJAOQ', ' ', 'PAJAOQ', ' ', 'PAJAOQ', ' ', 'PAJAOQ', ' ', 'PAJAOQ', ' ', 'PAJAOQ', ' ', 'PAJAOQ', ' ', 'PAJAOQ', ' ', 'PAJAOQ\n'] for o in lodhirt: print '\r\x1b[1;97m╔[\x1b[1;32m+\x1b[1;97m] \x1b[1;92mSUBSCRIBE CHANNEL \x1b[1;96m' + o, sys.stdout.flush() time.sleep(0.1) os.system('clear') logoname = '\033[1;32m ____________\n\033[1;32m ║▒▒▒▒▒▒▒▒▒▒║\n\033[1;32m ║▒▒▒▒▒▒▒▒▒▒║\n\033[1;32m ║▒▒▒▒▒▒▒▒▒▒║\033[1;33m ╔╗╔╔═╗╔╦╗╔═╗\n\033[1;32m ╔════════════╗\033[1;33m ║║║╠═╣║║║╠═╣\n\033[1;32m ╚════════════╝\033[1;33m ╝╚╝╩ ╩╩ ╩╩ ╩\n\033[1;31m ║\033[1;36m██████████\033[1;31m╚╗\033[1;33m ╦╔═╔═╗╔╦╗╦ ╦\n\033[1;31m ║\033[1;36m██\033[1;31m╔══╗\033[1;36m█\033[1;31m╔═╗\033[1;36m█\033[1;31m║\033[1;33m ╠╩╗╠═╣║║║║ ║\n\033[1;31m ║\033[1;36m██\033[1;31m║\033[1;33m╬\033[1;31m╔╝\033[1;36m█\033[1;31m╚╗║\033[1;36m█\033[1;31m║\033[1;33m ╩ ╩╩ ╩╩ ╩╚═╝\n\033[1;31m ║\033[1;36m██\033[1;31m╚═╝\033[1;36m█\033[1;31m║\033[1;36m█\033[1;31m╚╝\033[1;36m█\033[1;31m║\033[0m Subscribe\n\033[1;31m ╚╗\033[1;36m█████████\033[1;31m═╝ \033[0mChannel\n\033[1;31m ╚╗║╠╩╩╩╩╩╝ \033[0mPAJAOQ\n\033[1;31m ║║╚╗\033[1;33m┈\033[1;34m█▐█████\033[1;31m▒\033[0m.。oO\n\033[1;31m ║\033[1;36m██\033[1;31m╠╦╦╦╗\n\033[1;31m ╚╗\033[1;36m██████ \033[0mAuthor \033[1;31m: \033[1;32mNovalAttasya\n\033[1;31m ╚════╝ \033[0mTeam \033[1;31m: \033[1;32mC3L3N6 CYBER BORNEO\n \033[1;33m<══════════════════════════════════>\n\033[1;31m' print logoname enternamek = raw_input("\033[1;31m[*] \033[1;32mMASUKAN NAMA KAMU: \033[1;36m") os.system('clear') print 32 * '\x1b[1;97m\xe2\x95\x90' print '\033[1;33m █░░░█ █▀▀ █░░ ▄▀ ▄▀▄ █▄░▄█ █▀▀\n █░█░█ █▀▀ █░▄ █░ █░█ █░█░█ █▀▀\n ░▀░▀░ ▀▀▀ ▀▀▀ ░▀ ░▀░ ▀░░░▀ ▀▀▀' print ' \033[1;31m[*] \033[1;37mHi \033[1;36m' + enternamek print 32 * '\x1b[1;97m\xe2\x95\x90' lodhirt() print '\033[1;37m║' print '\033[1;37m╠\033[1;37m[\033[1;31m*\033[1;37m] \033[1;32mPILIH MENUNYA \033[1;37m[\033[1;31m*\033[1;37m]' print '║\033[1;37m{\033[1;33m1\033[1;37m} \033[1;34mLogin Toolnya\033[1;37m' print '║\033[1;37m{\033[1;33m2\033[1;37m} \033[1;34mHubungi Author \033[0m(\033[1;32mWhatsApp\033[1;37m)' print '║\033[1;37m{\033[1;33m3\033[1;37m} \033[1;34mInstall Bahan\033[1;37m' print '║\033[1;37m{\033[1;33m4\033[1;37m} \033[1;34mDownload User & Pass\033[1;37m' print '║\033[1;37m{\033[1;31m0\033[1;37m} \033[1;31mExit.' pilih = input("\033[1;37m╚═\x1b[1;91m▶\x1b[1;97m ") if pilih == 1: tik() entertools() elif pilih == 2: tik() wa() print '\n\033[1;37mTerimakasih Telah Menggunakan Tools Ini ^_^' elif pilih == 3: tik() os.system ('bash babi_lu.sh') elif pilih == 4: os.system('xdg-open https://adsafelink.com/9RRWZ') elif pilih == 0: os.system('clear') print '\033[1;37mTerimakasih Telah Menggunakan Tools Ini ^_^' os.system('exit')
| 6,802
| 13,603
| 0.475007
| 2,435
| 13,604
| 3.94538
| 0.15154
| 0.217342
| 0.139898
| 0.173832
| 0.686583
| 0.663579
| 0.654731
| 0.63818
| 0.621422
| 0.60508
| 0
| 0.26287
| 0.060423
| 13,604
| 1
| 13,604
| 13,604
| 0.240886
| 0.999412
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
80b7bf86edb14b13eb70c71f15593037a7821256
| 155
|
py
|
Python
|
app/app/attribute_authority/__init__.py
|
MartinHeinz/IoT-Cloud
|
2e6fddcfe2624862c9351759334a6655a896e8c7
|
[
"MIT"
] | 14
|
2019-11-17T23:49:20.000Z
|
2022-02-04T23:28:45.000Z
|
app/app/attribute_authority/__init__.py
|
MartinHeinz/IoT-Cloud
|
2e6fddcfe2624862c9351759334a6655a896e8c7
|
[
"MIT"
] | 3
|
2019-12-02T18:26:11.000Z
|
2021-04-30T20:46:06.000Z
|
app/app/attribute_authority/__init__.py
|
MartinHeinz/IoT-Cloud
|
2e6fddcfe2624862c9351759334a6655a896e8c7
|
[
"MIT"
] | 4
|
2018-12-28T13:41:44.000Z
|
2020-09-13T14:14:06.000Z
|
from flask import Blueprint
attr_authority = Blueprint('attr_authority', __name__)
from . import endpoints # noqa pylint: disable=wrong-import-position
| 25.833333
| 69
| 0.8
| 19
| 155
| 6.210526
| 0.684211
| 0.220339
| 0.372881
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.122581
| 155
| 5
| 70
| 31
| 0.867647
| 0.270968
| 0
| 0
| 0
| 0
| 0.126126
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 7
|
038072e6d165501c3f41952980e6aa1a190869b6
| 183
|
py
|
Python
|
commands.py
|
zhuitrec/django-arcutils
|
4079ef641f43baab4cda4681b1f76e320f12eb38
|
[
"MIT"
] | null | null | null |
commands.py
|
zhuitrec/django-arcutils
|
4079ef641f43baab4cda4681b1f76e320f12eb38
|
[
"MIT"
] | null | null | null |
commands.py
|
zhuitrec/django-arcutils
|
4079ef641f43baab4cda4681b1f76e320f12eb38
|
[
"MIT"
] | null | null | null |
from runcommands.commands import show_config
from arctasks.base import lint # noqa
from arctasks.python import show_upgraded_packages # noqa
from arctasks.release import * # noqa
| 30.5
| 58
| 0.814208
| 25
| 183
| 5.84
| 0.56
| 0.246575
| 0.219178
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142077
| 183
| 5
| 59
| 36.6
| 0.929936
| 0.076503
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
038141cb5f435176678b67ddd5dd5e5195a03175
| 2,681
|
py
|
Python
|
Moves/Fever.py
|
johan--/PoMoCo_RobCook
|
0ca95286b0f21803ed3a33cbad6d11fce4d7172a
|
[
"MIT",
"Unlicense"
] | 10
|
2015-07-14T05:23:56.000Z
|
2021-08-07T16:46:42.000Z
|
Moves/Fever.py
|
rpcook/PoMoCo
|
08f5170006bafabc2d70d5a681b62f7448afdbd2
|
[
"Unlicense",
"MIT"
] | 6
|
2015-05-02T23:10:38.000Z
|
2015-05-02T23:15:39.000Z
|
Moves/Fever.py
|
rpcook/PoMoCo
|
08f5170006bafabc2d70d5a681b62f7448afdbd2
|
[
"Unlicense",
"MIT"
] | 10
|
2015-05-19T06:27:49.000Z
|
2021-08-08T04:13:04.000Z
|
import time
# Move: Night Fever Dance
# Get the front legs into space @ front
hexy.LF.replantFoot(-40,stepTime=0.3)
time.sleep(0.4)
hexy.RF.replantFoot(40,stepTime=0.3)
time.sleep(0.4)
# move the mid-legs forwards to support body
hexy.LM.replantFoot(-40,stepTime=0.3)
time.sleep(0.4)
hexy.RM.replantFoot(40,stepTime=0.3)
time.sleep(0.4)
# lean back a bit
hexy.RB.setFootY(floor-20)
hexy.LB.setFootY(floor-20)
hexy.LF.setFootY(floor+60)
time.sleep(0.5)
# wave right arm about (up and right)
hexy.RF.knee(-60)
hexy.RF.ankle(0)
hexy.RF.hip(0)
hexy.neck.set(-40)
# dip body
hexy.LF.setFootY(floor+62)
hexy.LM.setFootY(floor-8)
hexy.LB.setFootY(floor-28)
hexy.RM.setFootY(floor-8)
hexy.RB.setFootY(floor-28)
# wait a bit
time.sleep(0.4)
# right arm down
hexy.RF.knee(50)
hexy.RF.ankle(-50)
hexy.RF.hip(70)
hexy.neck.set(30)
# raise body
hexy.LF.setFootY(floor+70)
hexy.LM.setFootY(floor)
hexy.LB.setFootY(floor-20)
hexy.RM.setFootY(floor)
hexy.RB.setFootY(floor-20)
# wait a bit
time.sleep(0.4)
# wave right arm about (up and right)
hexy.RF.knee(-60)
hexy.RF.ankle(0)
hexy.RF.hip(0)
hexy.neck.set(-40)
# dip body
hexy.LF.setFootY(floor+62)
hexy.LM.setFootY(floor-8)
hexy.LB.setFootY(floor-28)
hexy.RM.setFootY(floor-8)
hexy.RB.setFootY(floor-28)
# wait a bit
time.sleep(0.4)
# right arm down
hexy.RF.knee(50)
hexy.RF.ankle(-50)
hexy.RF.hip(70)
hexy.neck.set(30)
# raise body
hexy.LF.setFootY(floor+70)
hexy.LM.setFootY(floor)
hexy.LB.setFootY(floor-20)
hexy.RM.setFootY(floor)
hexy.RB.setFootY(floor-20)
# wait a bit
time.sleep(0.4)
# wave right arm about (up and right)
hexy.RF.knee(-60)
hexy.RF.ankle(0)
hexy.RF.hip(0)
hexy.neck.set(-40)
# dip body
hexy.LF.setFootY(floor+62)
hexy.LM.setFootY(floor-8)
hexy.LB.setFootY(floor-28)
hexy.RM.setFootY(floor-8)
hexy.RB.setFootY(floor-28)
# wait a bit
time.sleep(0.4)
# right arm down
hexy.RF.knee(50)
hexy.RF.ankle(-50)
hexy.RF.hip(70)
hexy.neck.set(30)
# raise body
hexy.LF.setFootY(floor+70)
hexy.LM.setFootY(floor)
hexy.LB.setFootY(floor-20)
hexy.RM.setFootY(floor)
hexy.RB.setFootY(floor-20)
# wait a bit
time.sleep(0.4)
# wave right arm about (up and right)
hexy.RF.knee(-60)
hexy.RF.ankle(0)
hexy.RF.hip(0)
hexy.neck.set(-40)
# dip body
hexy.LF.setFootY(floor+62)
hexy.LM.setFootY(floor-8)
hexy.LB.setFootY(floor-28)
hexy.RM.setFootY(floor-8)
hexy.RB.setFootY(floor-28)
# wait a bit
time.sleep(0.4)
# right arm down
hexy.RF.knee(50)
hexy.RF.ankle(-50)
hexy.RF.hip(70)
hexy.neck.set(30)
# raise body
hexy.LF.setFootY(floor+70)
hexy.LM.setFootY(floor)
hexy.LB.setFootY(floor-20)
hexy.RM.setFootY(floor)
hexy.RB.setFootY(floor-20)
# wait a bit
time.sleep(0.4)
# TODO: jiggle hips (requires some new inverse kinematics)
| 19.014184
| 58
| 0.729952
| 528
| 2,681
| 3.706439
| 0.130682
| 0.285641
| 0.066428
| 0.06745
| 0.900358
| 0.889627
| 0.876852
| 0.876852
| 0.876852
| 0.842105
| 0
| 0.065871
| 0.093995
| 2,681
| 140
| 59
| 19.15
| 0.739811
| 0.204774
| 0
| 0.924731
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007143
| 0
| 1
| 0
| true
| 0
| 0.010753
| 0
| 0.010753
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
03a249f8e02078209ce2cc0bf9b95fc6a4d1cba6
| 52,652
|
py
|
Python
|
Incident-Response/Tools/dfirtrack/dfirtrack_main/tests/task/test_task_views.py
|
sn0b4ll/Incident-Playbook
|
cf519f58fcd4255674662b3620ea97c1091c1efb
|
[
"MIT"
] | 1
|
2021-07-24T17:22:50.000Z
|
2021-07-24T17:22:50.000Z
|
Incident-Response/Tools/dfirtrack/dfirtrack_main/tests/task/test_task_views.py
|
sn0b4ll/Incident-Playbook
|
cf519f58fcd4255674662b3620ea97c1091c1efb
|
[
"MIT"
] | 2
|
2022-02-28T03:40:31.000Z
|
2022-02-28T03:40:52.000Z
|
Incident-Response/Tools/dfirtrack/dfirtrack_main/tests/task/test_task_views.py
|
sn0b4ll/Incident-Playbook
|
cf519f58fcd4255674662b3620ea97c1091c1efb
|
[
"MIT"
] | 2
|
2022-02-25T08:34:51.000Z
|
2022-03-16T17:29:44.000Z
|
from datetime import datetime
from django.contrib.auth.models import User
from django.test import TestCase
from django.utils import timezone
from dfirtrack_main.models import System, Systemstatus, Task, Taskname, Taskpriority, Taskstatus
from mock import patch
import urllib.parse
class TaskViewTestCase(TestCase):
""" task view tests """
@classmethod
def setUpTestData(cls):
# create user
test_user = User.objects.create_user(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# create object
systemstatus_1 = Systemstatus.objects.create(systemstatus_name='systemstatus_1')
# create object
System.objects.create(
system_name='system_1',
systemstatus = systemstatus_1,
system_modify_time = timezone.now(),
system_created_by_user_id = test_user,
system_modified_by_user_id = test_user,
)
# create object
taskname_1 = Taskname.objects.create(taskname_name='taskname_1')
# create object
taskpriority_1 = Taskpriority.objects.create(taskpriority_name='prio_1')
# create object
taskstatus_1 = Taskstatus.objects.create(taskstatus_name='taskstatus_1')
# create object
Task.objects.create(
taskname = taskname_1,
taskpriority = taskpriority_1,
taskstatus = taskstatus_1,
task_created_by_user_id = test_user,
task_modified_by_user_id = test_user,
)
def test_task_list_not_logged_in(self):
""" test list view """
# create url
destination = '/login/?next=' + urllib.parse.quote('/task/', safe='')
# get response
response = self.client.get('/task/', follow=True)
# compare
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
def test_task_list_logged_in(self):
""" test list view """
# login testuser
self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get response
response = self.client.get('/task/')
# compare
self.assertEqual(response.status_code, 200)
def test_task_list_template(self):
""" test list view """
# login testuser
self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get response
response = self.client.get('/task/')
# compare
self.assertTemplateUsed(response, 'dfirtrack_main/task/task_list.html')
def test_task_list_get_user_context(self):
""" test list view """
# login testuser
self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get response
response = self.client.get('/task/')
# compare
self.assertEqual(str(response.context['user']), 'testuser_task')
def test_task_list_redirect(self):
""" test list view """
# login testuser
self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# create url
destination = urllib.parse.quote('/task/', safe='/')
# get response
response = self.client.get('/task', follow=True)
# compare
self.assertRedirects(response, destination, status_code=301, target_status_code=200)
def test_task_closed_not_logged_in(self):
""" test closed view """
# create url
destination = '/login/?next=' + urllib.parse.quote('/task/closed/', safe='')
# get response
response = self.client.get('/task/closed/', follow=True)
# compare
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
def test_task_closed_logged_in(self):
""" test closed view """
# login testuser
self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get response
response = self.client.get('/task/closed/')
# compare
self.assertEqual(response.status_code, 200)
def test_task_closed_template(self):
""" test closed view """
# login testuser
self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get response
response = self.client.get('/task/closed/')
# compare
self.assertTemplateUsed(response, 'dfirtrack_main/task/task_closed.html')
def test_task_closed_get_user_context(self):
""" test closed view """
# login testuser
self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get response
response = self.client.get('/task/closed/')
# compare
self.assertEqual(str(response.context['user']), 'testuser_task')
def test_task_closed_redirect(self):
""" test closed view """
# login testuser
self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# create url
destination = urllib.parse.quote('/task/closed/', safe='/')
# get response
response = self.client.get('/task/closed', follow=True)
# compare
self.assertRedirects(response, destination, status_code=301, target_status_code=200)
def test_task_all_not_logged_in(self):
""" test all view """
# create url
destination = '/login/?next=' + urllib.parse.quote('/task/all/', safe='')
# get response
response = self.client.get('/task/all/', follow=True)
# compare
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
def test_task_all_logged_in(self):
""" test all view """
# login testuser
self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get response
response = self.client.get('/task/all/')
# compare
self.assertEqual(response.status_code, 200)
def test_task_all_template(self):
""" test all view """
# login testuser
self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get response
response = self.client.get('/task/all/')
# compare
self.assertTemplateUsed(response, 'dfirtrack_main/task/task_all.html')
def test_task_all_get_user_context(self):
""" test all view """
# login testuser
self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get response
response = self.client.get('/task/all/')
# compare
self.assertEqual(str(response.context['user']), 'testuser_task')
def test_task_all_redirect(self):
""" test all view """
# login testuser
self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# create url
destination = urllib.parse.quote('/task/all/', safe='/')
# get response
response = self.client.get('/task/all', follow=True)
# compare
self.assertRedirects(response, destination, status_code=301, target_status_code=200)
def test_task_detail_not_logged_in(self):
""" test detail view """
# get object
taskname_1 = Taskname.objects.get(taskname_name='taskname_1')
# get object
task_1 = Task.objects.get(taskname=taskname_1)
# create url
destination = '/login/?next=' + urllib.parse.quote('/task/' + str(task_1.task_id) + '/', safe='')
# get response
response = self.client.get('/task/' + str(task_1.task_id) + '/', follow=True)
# compare
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
def test_task_detail_logged_in(self):
""" test detail view """
# get object
taskname_1 = Taskname.objects.get(taskname_name='taskname_1')
# get object
task_1 = Task.objects.get(taskname=taskname_1)
# login testuser
self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get response
response = self.client.get('/task/' + str(task_1.task_id) + '/')
# compare
self.assertEqual(response.status_code, 200)
def test_task_detail_template(self):
""" test detail view """
# get object
taskname_1 = Taskname.objects.get(taskname_name='taskname_1')
# get object
task_1 = Task.objects.get(taskname=taskname_1)
# login testuser
self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get response
response = self.client.get('/task/' + str(task_1.task_id) + '/')
# compare
self.assertTemplateUsed(response, 'dfirtrack_main/task/task_detail.html')
def test_task_detail_get_user_context(self):
""" test detail view """
# get object
taskname_1 = Taskname.objects.get(taskname_name='taskname_1')
# get object
task_1 = Task.objects.get(taskname=taskname_1)
# login testuser
self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get response
response = self.client.get('/task/' + str(task_1.task_id) + '/')
# compare
self.assertEqual(str(response.context['user']), 'testuser_task')
def test_task_detail_redirect(self):
""" test detail view """
# get object
taskname_1 = Taskname.objects.get(taskname_name='taskname_1')
# get object
task_1 = Task.objects.get(taskname=taskname_1)
# login testuser
self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# create url
destination = urllib.parse.quote('/task/' + str(task_1.task_id) + '/', safe='/')
# get response
response = self.client.get('/task/' + str(task_1.task_id), follow=True)
# compare
self.assertRedirects(response, destination, status_code=301, target_status_code=200)
def test_task_add_not_logged_in(self):
""" test add view """
# create url
destination = '/login/?next=' + urllib.parse.quote('/task/add/', safe='')
# get response
response = self.client.get('/task/add/', follow=True)
# compare
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
def test_task_add_logged_in(self):
""" test add view """
# login testuser
self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get response
response = self.client.get('/task/add/')
# compare
self.assertEqual(response.status_code, 200)
def test_task_add_system_selected(self):
""" test add view """
# login testuser
self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get object
system_id = System.objects.get(system_name = 'system_1').system_id
# get response
response = self.client.get('/task/add/?system=' + str(system_id))
# compare
self.assertEqual(response.status_code, 200)
def test_task_add_template(self):
""" test add view """
# login testuser
self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get response
response = self.client.get('/task/add/')
# compare
self.assertTemplateUsed(response, 'dfirtrack_main/task/task_add.html')
def test_task_add_get_user_context(self):
""" test add view """
# login testuser
self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get response
response = self.client.get('/task/add/')
# compare
self.assertEqual(str(response.context['user']), 'testuser_task')
def test_task_add_redirect(self):
""" test add view """
# login testuser
self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# create url
destination = urllib.parse.quote('/task/add/', safe='/')
# get response
response = self.client.get('/task/add', follow=True)
# compare
self.assertRedirects(response, destination, status_code=301, target_status_code=200)
def test_task_add_post_redirect(self):
""" test add view """
# login testuser
self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get user
test_user_id = User.objects.get(username = 'testuser_task').id
# get object
taskname_id = Taskname.objects.create(taskname_name = 'task_add_post_test').taskname_id
# get object
taskpriority_id = Taskpriority.objects.get(taskpriority_name = 'prio_1').taskpriority_id
# get object
taskstatus_id = Taskstatus.objects.get(taskstatus_name = 'taskstatus_1').taskstatus_id
# get post data
data_dict = {
'taskname': taskname_id,
'taskpriority': taskpriority_id,
'taskstatus': taskstatus_id,
'task_created_by_user_id': test_user_id,
'task_modified_by_user_id': test_user_id,
}
# get response
response = self.client.post('/task/add/', data_dict)
# get object
taskname = Taskname.objects.get(taskname_name = 'task_add_post_test')
# get object
task_id = Task.objects.get(taskname = taskname).task_id
# create url
destination = urllib.parse.quote('/task/' + str(task_id) + '/', safe='/')
# compare
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
def test_task_add_post_system_selected_redirect(self):
""" test add view """
# login testuser
self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get object
system_id = System.objects.get(system_name = 'system_1').system_id
# get user
test_user_id = User.objects.get(username = 'testuser_task').id
# get object
taskname_id = Taskname.objects.create(taskname_name = 'task_add_post_test').taskname_id
# get object
taskpriority_id = Taskpriority.objects.get(taskpriority_name = 'prio_1').taskpriority_id
# get object
taskstatus_id = Taskstatus.objects.get(taskstatus_name = 'taskstatus_1').taskstatus_id
# get post data
data_dict = {
'taskname': taskname_id,
'taskpriority': taskpriority_id,
'taskstatus': taskstatus_id,
'task_created_by_user_id': test_user_id,
'task_modified_by_user_id': test_user_id,
}
# get response
response = self.client.post('/task/add/?system=' + str(system_id), data_dict)
# create url
destination = urllib.parse.quote('/system/' + str(system_id) + '/', safe='/')
# compare
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
def test_task_add_post_invalid_reload(self):
""" test add view """
# login testuser
self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# create post data
data_dict = {}
# get response
response = self.client.post('/task/add/', data_dict)
# compare
self.assertEqual(response.status_code, 200)
def test_task_add_post_invalid_template(self):
""" test add view """
# login testuser
self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# create post data
data_dict = {}
# get response
response = self.client.post('/task/add/', data_dict)
# compare
self.assertTemplateUsed(response, 'dfirtrack_main/task/task_add.html')
def test_task_add_times_pending(self):
""" test add view """
# login testuser
self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get user
test_user_id = User.objects.get(username = 'testuser_task').id
# get object
taskname_id = Taskname.objects.create(taskname_name = 'task_add_times_pending').taskname_id
# get object
taskpriority_id = Taskpriority.objects.get(taskpriority_name = 'prio_1').taskpriority_id
# get object
taskstatus_id = Taskstatus.objects.get(taskstatus_name = '10_pending').taskstatus_id
# get post data
data_dict = {
'taskname': taskname_id,
'taskpriority': taskpriority_id,
'taskstatus': taskstatus_id,
'task_created_by_user_id': test_user_id,
'task_modified_by_user_id': test_user_id,
}
# get response
self.client.post('/task/add/', data_dict)
# get object
taskname = Taskname.objects.get(taskname_name = 'task_add_times_pending')
# get object
task_add_times_pending = Task.objects.get(taskname = taskname)
# compare
self.assertEqual(task_add_times_pending.task_started_time, None)
self.assertEqual(task_add_times_pending.task_finished_time, None)
def test_task_add_times_working(self):
""" test add view """
# mock timezone.now()
dt = datetime(2020, 1, 2, tzinfo=timezone.utc)
with patch.object(timezone, 'now', return_value=dt):
# login testuser
self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get user
test_user_id = User.objects.get(username = 'testuser_task').id
# get object
taskname_id = Taskname.objects.create(taskname_name = 'task_add_times_working').taskname_id
# get object
taskpriority_id = Taskpriority.objects.get(taskpriority_name = 'prio_1').taskpriority_id
# get object
taskstatus_id = Taskstatus.objects.get(taskstatus_name = '20_working').taskstatus_id
# get post data
data_dict = {
'taskname': taskname_id,
'taskpriority': taskpriority_id,
'taskstatus': taskstatus_id,
'task_created_by_user_id': test_user_id,
'task_modified_by_user_id': test_user_id,
}
# get response
self.client.post('/task/add/', data_dict)
# get object
taskname = Taskname.objects.get(taskname_name = 'task_add_times_working')
# get object
task_add_times_working = Task.objects.get(taskname = taskname)
# compare
self.assertEqual(task_add_times_working.task_started_time, timezone.now())
self.assertEqual(task_add_times_working.task_finished_time, None)
def test_task_add_times_done(self):
""" test add view """
# mock timezone.now()
dt = datetime(2020, 1, 2, tzinfo=timezone.utc)
with patch.object(timezone, 'now', return_value=dt):
# login testuser
self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get user
test_user_id = User.objects.get(username = 'testuser_task').id
# get object
taskname_id = Taskname.objects.create(taskname_name = 'task_add_times_done').taskname_id
# get object
taskpriority_id = Taskpriority.objects.get(taskpriority_name = 'prio_1').taskpriority_id
# get object
taskstatus_id = Taskstatus.objects.get(taskstatus_name = '30_done').taskstatus_id
# get post data
data_dict = {
'taskname': taskname_id,
'taskpriority': taskpriority_id,
'taskstatus': taskstatus_id,
'task_created_by_user_id': test_user_id,
'task_modified_by_user_id': test_user_id,
}
# get response
self.client.post('/task/add/', data_dict)
# get object
taskname = Taskname.objects.get(taskname_name = 'task_add_times_done')
# get object
task_add_times_done = Task.objects.get(taskname = taskname)
# compare
self.assertEqual(task_add_times_done.task_started_time, timezone.now())
self.assertEqual(task_add_times_done.task_finished_time, timezone.now())
def test_task_edit_not_logged_in(self):
""" test edit view """
# get object
taskname_1 = Taskname.objects.get(taskname_name='taskname_1')
# get object
task_1 = Task.objects.get(taskname=taskname_1)
# create url
destination = '/login/?next=' + urllib.parse.quote('/task/' + str(task_1.task_id) + '/edit/', safe='')
# get response
response = self.client.get('/task/' + str(task_1.task_id) + '/edit/', follow=True)
# compare
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
def test_task_edit_logged_in(self):
""" test edit view """
# get object
taskname_1 = Taskname.objects.get(taskname_name='taskname_1')
# get object
task_1 = Task.objects.get(taskname=taskname_1)
# login testuser
self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get response
response = self.client.get('/task/' + str(task_1.task_id) + '/edit/')
# compare
self.assertEqual(response.status_code, 200)
def test_task_edit_template(self):
""" test edit view """
# get object
taskname_1 = Taskname.objects.get(taskname_name='taskname_1')
# get object
task_1 = Task.objects.get(taskname=taskname_1)
# login testuser
self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get response
response = self.client.get('/task/' + str(task_1.task_id) + '/edit/')
# compare
self.assertTemplateUsed(response, 'dfirtrack_main/task/task_edit.html')
def test_task_edit_get_user_context(self):
""" test edit view """
# get object
taskname_1 = Taskname.objects.get(taskname_name='taskname_1')
# get object
task_1 = Task.objects.get(taskname=taskname_1)
# login testuser
self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get response
response = self.client.get('/task/' + str(task_1.task_id) + '/edit/')
# compare
self.assertEqual(str(response.context['user']), 'testuser_task')
def test_task_edit_redirect(self):
""" test edit view """
# get object
taskname_1 = Taskname.objects.get(taskname_name='taskname_1')
# get object
task_1 = Task.objects.get(taskname=taskname_1)
# login testuser
self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# create url
destination = urllib.parse.quote('/task/' + str(task_1.task_id) + '/edit/', safe='/')
# get response
response = self.client.get('/task/' + str(task_1.task_id) + '/edit', follow=True)
# compare
self.assertRedirects(response, destination, status_code=301, target_status_code=200)
def test_task_edit_post_redirect(self):
""" test edit view """
# login testuser
self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get user
test_user = User.objects.get(username = 'testuser_task')
# get object
taskname_1 = Taskname.objects.create(taskname_name = 'task_edit_post_test_1')
# get object
taskname_2 = Taskname.objects.create(taskname_name = 'task_edit_post_test_2')
# get object
taskpriority = Taskpriority.objects.get(taskpriority_name = 'prio_1')
# get object
taskstatus = Taskstatus.objects.get(taskstatus_name = 'taskstatus_1')
# create object
task_1 = Task.objects.create(
taskname = taskname_1,
taskpriority = taskpriority,
taskstatus = taskstatus,
task_created_by_user_id = test_user,
task_modified_by_user_id = test_user,
)
# create post data
data_dict = {
'taskname': taskname_2.taskname_id,
'taskpriority': taskpriority.taskpriority_id,
'taskstatus': taskstatus.taskstatus_id,
'task_modified_by_user_id': test_user.id,
}
# get response
response = self.client.post('/task/' + str(task_1.task_id) + '/edit/', data_dict)
# get object
task_2 = Task.objects.get(taskname = taskname_2)
# create url
destination = urllib.parse.quote('/task/' + str(task_2.task_id) + '/', safe='/')
# compare
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
def test_task_edit_post_system_selected_redirect(self):
""" test edit view """
# login testuser
self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get object
system_id = System.objects.get(system_name = 'system_1').system_id
# get user
test_user = User.objects.get(username = 'testuser_task')
# get object
taskname_1 = Taskname.objects.create(taskname_name = 'task_edit_post_test_1')
# get object
taskname_2 = Taskname.objects.create(taskname_name = 'task_edit_post_test_2')
# get object
taskpriority = Taskpriority.objects.get(taskpriority_name = 'prio_1')
# get object
taskstatus = Taskstatus.objects.get(taskstatus_name = 'taskstatus_1')
# create object
task_1 = Task.objects.create(
taskname = taskname_1,
taskpriority = taskpriority,
taskstatus = taskstatus,
task_created_by_user_id = test_user,
task_modified_by_user_id = test_user,
)
# create post data
data_dict = {
'taskname': taskname_2.taskname_id,
'taskpriority': taskpriority.taskpriority_id,
'taskstatus': taskstatus.taskstatus_id,
'task_modified_by_user_id': test_user.id,
}
# get response
response = self.client.post('/task/' + str(task_1.task_id) + '/edit/?system=' + str(system_id), data_dict)
# create url
destination = urllib.parse.quote('/system/' + str(system_id) + '/', safe='/')
# compare
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
def test_task_edit_post_invalid_reload(self):
""" test edit view """
# login testuser
self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get object
taskname_1 = Taskname.objects.get(taskname_name = 'taskname_1')
# get object
task_id = Task.objects.get(taskname = taskname_1).task_id
# create post data
data_dict = {}
# get response
response = self.client.post('/task/' + str(task_id) + '/edit/', data_dict)
# compare
self.assertEqual(response.status_code, 200)
def test_task_edit_post_invalid_template(self):
""" test edit view """
# login testuser
self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get object
taskname_1 = Taskname.objects.get(taskname_name = 'taskname_1')
# get object
task_id = Task.objects.get(taskname = taskname_1).task_id
# create post data
data_dict = {}
# get response
response = self.client.post('/task/' + str(task_id) + '/edit/', data_dict)
# compare
self.assertTemplateUsed(response, 'dfirtrack_main/task/task_edit.html')
def test_task_edit_times_pending(self):
""" test edit view """
# login testuser
self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get user
test_user = User.objects.get(username = 'testuser_task')
# get object
taskname_1 = Taskname.objects.create(taskname_name = 'task_edit_times_pending')
# get object
taskpriority = Taskpriority.objects.get(taskpriority_name = 'prio_1')
# get object
taskstatus_pending = Taskstatus.objects.get(taskstatus_name = '10_pending')
# get object
taskstatus_done = Taskstatus.objects.get(taskstatus_name = '30_done')
# create object
task_1 = Task.objects.create(
taskname = taskname_1,
taskpriority = taskpriority,
taskstatus = taskstatus_done,
task_created_by_user_id = test_user,
task_modified_by_user_id = test_user,
task_started_time = timezone.now(),
task_finished_time = timezone.now(),
)
# create post data
data_dict = {
'taskname': taskname_1.taskname_id,
'taskpriority': taskpriority.taskpriority_id,
'taskstatus': taskstatus_pending.taskstatus_id,
'task_modified_by_user_id': test_user.id,
}
# get response
self.client.post('/task/' + str(task_1.task_id) + '/edit/', data_dict)
# refresh object
task_1.refresh_from_db()
# compare
self.assertEqual(task_1.task_started_time, None)
self.assertEqual(task_1.task_finished_time, None)
def test_task_edit_times_working(self):
""" test edit view """
# mock timezone.now()
dt = datetime(2020, 1, 2, tzinfo=timezone.utc)
with patch.object(timezone, 'now', return_value=dt):
# login testuser
self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get user
test_user = User.objects.get(username = 'testuser_task')
# get object
taskname_1 = Taskname.objects.create(taskname_name = 'task_edit_times_working')
# get object
taskpriority = Taskpriority.objects.get(taskpriority_name = 'prio_1')
# get object
taskstatus_working = Taskstatus.objects.get(taskstatus_name = '20_working')
# get object
taskstatus_pending = Taskstatus.objects.get(taskstatus_name = '10_pending')
# create object
task_1 = Task.objects.create(
taskname = taskname_1,
taskpriority = taskpriority,
taskstatus = taskstatus_pending,
task_created_by_user_id = test_user,
task_modified_by_user_id = test_user,
)
# create post data
data_dict = {
'taskname': taskname_1.taskname_id,
'taskpriority': taskpriority.taskpriority_id,
'taskstatus': taskstatus_working.taskstatus_id,
'task_modified_by_user_id': test_user.id,
}
# get response
self.client.post('/task/' + str(task_1.task_id) + '/edit/', data_dict)
# refresh object
task_1.refresh_from_db()
# compare
self.assertEqual(task_1.task_started_time, timezone.now())
self.assertEqual(task_1.task_finished_time, None)
def test_task_edit_times_done(self):
""" test edit view """
# mock timezone.now()
dt = datetime(2020, 1, 2, tzinfo=timezone.utc)
with patch.object(timezone, 'now', return_value=dt):
# login testuser
self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get user
test_user = User.objects.get(username = 'testuser_task')
# get object
taskname_1 = Taskname.objects.create(taskname_name = 'task_edit_times_done')
# get object
taskpriority = Taskpriority.objects.get(taskpriority_name = 'prio_1')
# get object
taskstatus_done = Taskstatus.objects.get(taskstatus_name = '30_done')
# get object
taskstatus_pending = Taskstatus.objects.get(taskstatus_name = '10_pending')
# create object
task_1 = Task.objects.create(
taskname = taskname_1,
taskpriority = taskpriority,
taskstatus = taskstatus_pending,
task_created_by_user_id = test_user,
task_modified_by_user_id = test_user,
)
# create post data
data_dict = {
'taskname': taskname_1.taskname_id,
'taskpriority': taskpriority.taskpriority_id,
'taskstatus': taskstatus_done.taskstatus_id,
'task_modified_by_user_id': test_user.id,
}
# get response
self.client.post('/task/' + str(task_1.task_id) + '/edit/', data_dict)
# refresh object
task_1.refresh_from_db()
# compare
self.assertEqual(task_1.task_started_time, timezone.now())
self.assertEqual(task_1.task_started_time, timezone.now())
def test_task_start_redirect(self):
""" test task start view """
# login testuser
self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get object
taskname_1 = Taskname.objects.get(taskname_name='taskname_1')
# get object
task_1 = Task.objects.get(taskname=taskname_1)
# create url
destination = urllib.parse.quote('/task/' + str(task_1.task_id) + '/', safe='/')
# get response
response = self.client.get('/task/' + str(task_1.task_id) + '/start/', follow=True)
# compare
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
def test_task_start_system_selected(self):
""" test task start view """
# login testuser
self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get object
system_id = System.objects.get(system_name = 'system_1').system_id
# get object
taskname_1 = Taskname.objects.get(taskname_name='taskname_1')
# get object
task_1 = Task.objects.get(taskname=taskname_1)
# create url
destination = urllib.parse.quote('/system/' + str(system_id) + '/', safe='/')
# get response
response = self.client.get('/task/' + str(task_1.task_id) + '/start/?system=' + str(system_id))
# compare
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
def test_task_start_status(self):
""" test task start view """
# login testuser
self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get user
test_user = User.objects.get(username = 'testuser_task')
# create object
taskname_task_start = Taskname.objects.create(taskname_name = 'task_start')
# get object
taskpriority_1 = Taskpriority.objects.get(taskpriority_name = 'prio_1')
# get object
taskstatus_pending = Taskstatus.objects.get(taskstatus_name = '10_pending')
# create object
task_task_start = Task.objects.create(
taskname = taskname_task_start,
taskpriority = taskpriority_1,
taskstatus = taskstatus_pending,
task_created_by_user_id = test_user,
task_modified_by_user_id = test_user,
)
# get response
self.client.get('/task/' + str(task_task_start.task_id) + '/start/')
# get object
task_started = Task.objects.get(task_id = task_task_start.task_id)
# get object
taskstatus_working = Taskstatus.objects.get(taskstatus_name = '20_working')
# compare
self.assertEqual(taskstatus_working, task_started.taskstatus)
def test_task_start_times(self):
""" test task start view """
# mock timezone.now()
dt = datetime(2020, 1, 2, tzinfo=timezone.utc)
with patch.object(timezone, 'now', return_value=dt):
# login testuser
self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get user
test_user = User.objects.get(username = 'testuser_task')
# create object
taskname_task_start = Taskname.objects.create(taskname_name = 'task_start')
# get object
taskpriority_1 = Taskpriority.objects.get(taskpriority_name = 'prio_1')
# get object
taskstatus_pending = Taskstatus.objects.get(taskstatus_name = '10_pending')
# create object
task_task_start = Task.objects.create(
taskname = taskname_task_start,
taskpriority = taskpriority_1,
taskstatus = taskstatus_pending,
task_created_by_user_id = test_user,
task_modified_by_user_id = test_user,
)
# get response
self.client.get('/task/' + str(task_task_start.task_id) + '/start/')
# get object
task_started = Task.objects.get(task_id = task_task_start.task_id)
# compare
self.assertEqual(task_started.task_started_time, timezone.now())
self.assertEqual(task_started.task_finished_time, None)
def test_task_finish_redirect(self):
""" test task finish view """
# login testuser
self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get object
taskname_1 = Taskname.objects.get(taskname_name='taskname_1')
# get object
task_1 = Task.objects.get(taskname=taskname_1)
# create url
destination = urllib.parse.quote('/task/' + str(task_1.task_id) + '/', safe='/')
# get response
response = self.client.get('/task/' + str(task_1.task_id) + '/finish/', follow=True)
# compare
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
def test_task_finish_system_selected(self):
""" test task finish view """
# login testuser
self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get object
system_id = System.objects.get(system_name = 'system_1').system_id
# get object
taskname_1 = Taskname.objects.get(taskname_name='taskname_1')
# get object
task_1 = Task.objects.get(taskname=taskname_1)
# create url
destination = urllib.parse.quote('/system/' + str(system_id) + '/', safe='/')
# get response
response = self.client.get('/task/' + str(task_1.task_id) + '/finish/?system=' + str(system_id))
# compare
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
def test_task_finish_status(self):
""" test task finish view """
# login testuser
self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get user
test_user = User.objects.get(username = 'testuser_task')
# create object
taskname_task_finish = Taskname.objects.create(taskname_name = 'task_finish')
# get object
taskpriority_1 = Taskpriority.objects.get(taskpriority_name = 'prio_1')
# get object
taskstatus_pending = Taskstatus.objects.get(taskstatus_name = '10_pending')
# create object
task_task_finish = Task.objects.create(
taskname = taskname_task_finish,
taskpriority = taskpriority_1,
taskstatus = taskstatus_pending,
task_created_by_user_id = test_user,
task_modified_by_user_id = test_user,
)
# get response
self.client.get('/task/' + str(task_task_finish.task_id) + '/finish/')
# get object
task_finished = Task.objects.get(task_id = task_task_finish.task_id)
# get object
taskstatus_done = Taskstatus.objects.get(taskstatus_name = '30_done')
# compare
self.assertEqual(taskstatus_done, task_finished.taskstatus)
def test_task_finish_times(self):
""" test task finish view """
# mock timezone.now()
dt = datetime(2020, 1, 2, tzinfo=timezone.utc)
with patch.object(timezone, 'now', return_value=dt):
# login testuser
self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get user
test_user = User.objects.get(username = 'testuser_task')
# create object
taskname_task_finish = Taskname.objects.create(taskname_name = 'task_finish')
# get object
taskpriority_1 = Taskpriority.objects.get(taskpriority_name = 'prio_1')
# get object
taskstatus_pending = Taskstatus.objects.get(taskstatus_name = '10_pending')
# create object
task_task_finish = Task.objects.create(
taskname = taskname_task_finish,
taskpriority = taskpriority_1,
taskstatus = taskstatus_pending,
task_created_by_user_id = test_user,
task_modified_by_user_id = test_user,
)
# get response
self.client.get('/task/' + str(task_task_finish.task_id) + '/finish/')
# get object
task_finished = Task.objects.get(task_id = task_task_finish.task_id)
# compare
self.assertEqual(task_finished.task_started_time, timezone.now())
self.assertEqual(task_finished.task_finished_time, timezone.now())
def test_task_renew_redirect(self):
""" test task renew view """
# login testuser
self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get object
taskname_1 = Taskname.objects.get(taskname_name='taskname_1')
# get object
task_1 = Task.objects.get(taskname=taskname_1)
# create url
destination = urllib.parse.quote('/task/' + str(task_1.task_id) + '/', safe='/')
# get response
response = self.client.get('/task/' + str(task_1.task_id) + '/renew/', follow=True)
# compare
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
def test_task_renew_system_selected(self):
""" test task renew view """
# login testuser
self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get object
system_id = System.objects.get(system_name = 'system_1').system_id
# get object
taskname_1 = Taskname.objects.get(taskname_name='taskname_1')
# get object
task_1 = Task.objects.get(taskname=taskname_1)
# create url
destination = urllib.parse.quote('/system/' + str(system_id) + '/', safe='/')
# get response
response = self.client.get('/task/' + str(task_1.task_id) + '/renew/?system=' + str(system_id))
# compare
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
def test_task_renew_status(self):
""" test task renew view """
# login testuser
self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get user
test_user = User.objects.get(username = 'testuser_task')
# create object
taskname_task_renew = Taskname.objects.create(taskname_name = 'task_renew')
# get object
taskpriority_1 = Taskpriority.objects.get(taskpriority_name = 'prio_1')
# get object
taskstatus_done = Taskstatus.objects.get(taskstatus_name = '30_done')
# create object
task_task_renew = Task.objects.create(
taskname = taskname_task_renew,
taskpriority = taskpriority_1,
taskstatus = taskstatus_done,
task_created_by_user_id = test_user,
task_modified_by_user_id = test_user,
)
# get response
self.client.get('/task/' + str(task_task_renew.task_id) + '/renew/')
# get object
task_renewed = Task.objects.get(task_id = task_task_renew.task_id)
# get object
taskstatus_pending = Taskstatus.objects.get(taskstatus_name = '10_pending')
# compare
self.assertEqual(taskstatus_pending, task_renewed.taskstatus)
def test_task_renew_user(self):
""" test task renew view """
# login testuser
self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get user
test_user = User.objects.get(username = 'testuser_task')
# create object
taskname_task_renew = Taskname.objects.create(taskname_name = 'task_renew')
# get object
taskpriority_1 = Taskpriority.objects.get(taskpriority_name = 'prio_1')
# get object
taskstatus_done = Taskstatus.objects.get(taskstatus_name = '30_done')
# create object
task_task_renew = Task.objects.create(
taskname = taskname_task_renew,
taskpriority = taskpriority_1,
taskstatus = taskstatus_done,
task_created_by_user_id = test_user,
task_modified_by_user_id = test_user,
task_assigned_to_user_id = test_user,
)
# get response
self.client.get('/task/' + str(task_task_renew.task_id) + '/renew/')
# get object
task_renewed = Task.objects.get(task_id = task_task_renew.task_id)
# compare
self.assertEqual(None, task_renewed.task_assigned_to_user_id)
def test_task_renew_times(self):
""" test task renew view """
# login testuser
self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get user
test_user = User.objects.get(username = 'testuser_task')
# create object
taskname_task_renew = Taskname.objects.create(taskname_name = 'task_renew')
# get object
taskpriority_1 = Taskpriority.objects.get(taskpriority_name = 'prio_1')
# get object
taskstatus_done = Taskstatus.objects.get(taskstatus_name = '30_done')
# create object
task_task_renew = Task.objects.create(
taskname = taskname_task_renew,
taskpriority = taskpriority_1,
taskstatus = taskstatus_done,
task_created_by_user_id = test_user,
task_modified_by_user_id = test_user,
task_started_time = timezone.now(),
)
# get response
self.client.get('/task/' + str(task_task_renew.task_id) + '/renew/')
# get object
task_renewed = Task.objects.get(task_id = task_task_renew.task_id)
# compare
self.assertEqual(task_renewed.task_started_time, None)
self.assertEqual(task_renewed.task_finished_time, None)
def test_task_set_user_redirect(self):
""" test task set_user view """
# login testuser
self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get object
taskname_1 = Taskname.objects.get(taskname_name='taskname_1')
# get object
task_1 = Task.objects.get(taskname=taskname_1)
# create url
destination = urllib.parse.quote('/task/' + str(task_1.task_id) + '/', safe='/')
# get response
response = self.client.get('/task/' + str(task_1.task_id) + '/set_user/', follow=True)
# compare
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
def test_task_set_user_system_selected(self):
""" test task set_user view """
# login testuser
self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get object
system_id = System.objects.get(system_name = 'system_1').system_id
# get object
taskname_1 = Taskname.objects.get(taskname_name='taskname_1')
# get object
task_1 = Task.objects.get(taskname=taskname_1)
# create url
destination = urllib.parse.quote('/system/' + str(system_id) + '/', safe='/')
# get response
response = self.client.get('/task/' + str(task_1.task_id) + '/set_user/?system=' + str(system_id))
# compare
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
def test_task_set_user_user(self):
""" test task set_user view """
# login testuser
self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get user
test_user = User.objects.get(username = 'testuser_task')
# create object
taskname_task_set_user = Taskname.objects.create(taskname_name = 'task_set_user')
# get object
taskpriority_1 = Taskpriority.objects.get(taskpriority_name = 'prio_1')
# get object
taskstatus_pending = Taskstatus.objects.get(taskstatus_name = '10_pending')
# create object
task_task_set_user = Task.objects.create(
taskname = taskname_task_set_user,
taskpriority = taskpriority_1,
taskstatus = taskstatus_pending,
task_created_by_user_id = test_user,
task_modified_by_user_id = test_user,
)
# get response
self.client.get('/task/' + str(task_task_set_user.task_id) + '/set_user/')
# get object
task_set_user = Task.objects.get(task_id = task_task_set_user.task_id)
# compare
self.assertEqual(test_user, task_set_user.task_assigned_to_user_id)
def test_task_unset_user_redirect(self):
""" test task unset_user view """
# login testuser
self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get object
taskname_1 = Taskname.objects.get(taskname_name='taskname_1')
# get object
task_1 = Task.objects.get(taskname=taskname_1)
# create url
destination = urllib.parse.quote('/task/' + str(task_1.task_id) + '/', safe='/')
# get response
response = self.client.get('/task/' + str(task_1.task_id) + '/unset_user/', follow=True)
# compare
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
def test_task_unset_user_system_selected(self):
""" test task unset_user view """
# login testuser
self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get object
system_id = System.objects.get(system_name = 'system_1').system_id
# get object
taskname_1 = Taskname.objects.get(taskname_name='taskname_1')
# get object
task_1 = Task.objects.get(taskname=taskname_1)
# create url
destination = urllib.parse.quote('/system/' + str(system_id) + '/', safe='/')
# get response
response = self.client.get('/task/' + str(task_1.task_id) + '/unset_user/?system=' + str(system_id))
# compare
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
def test_task_unset_user_user(self):
""" test task unset_user view """
# login testuser
self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')
# get user
test_user = User.objects.get(username = 'testuser_task')
# create object
taskname_task_unset_user = Taskname.objects.create(taskname_name = 'task_unset_user')
# get object
taskpriority_1 = Taskpriority.objects.get(taskpriority_name = 'prio_1')
# get object
taskstatus_pending = Taskstatus.objects.get(taskstatus_name = '10_pending')
# create object
task_task_unset_user = Task.objects.create(
taskname = taskname_task_unset_user,
taskpriority = taskpriority_1,
taskstatus = taskstatus_pending,
task_created_by_user_id = test_user,
task_modified_by_user_id = test_user,
task_assigned_to_user_id = test_user,
)
# get response
self.client.get('/task/' + str(task_task_unset_user.task_id) + '/unset_user/')
# get object
task_unset_user = Task.objects.get(task_id = task_task_unset_user.task_id)
# compare
self.assertEqual(None, task_unset_user.task_assigned_to_user_id)
| 41.3281
| 114
| 0.631087
| 5,912
| 52,652
| 5.351827
| 0.020129
| 0.042035
| 0.049305
| 0.052212
| 0.95866
| 0.941182
| 0.926296
| 0.91024
| 0.889633
| 0.880689
| 0
| 0.023025
| 0.262554
| 52,652
| 1,273
| 115
| 41.360566
| 0.791851
| 0.114867
| 0
| 0.683969
| 0
| 0
| 0.119422
| 0.01847
| 0
| 0
| 0
| 0
| 0.11145
| 1
| 0.099237
| false
| 0.090076
| 0.010687
| 0
| 0.11145
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
03e88ffcafd6342bee9f1caa48719a2fca701701
| 50
|
py
|
Python
|
level0/question85.py
|
kevin00000000/Python-programming-exercises
|
87546906d817263ae7ddbd0276f0bb36e0d63c41
|
[
"MIT"
] | null | null | null |
level0/question85.py
|
kevin00000000/Python-programming-exercises
|
87546906d817263ae7ddbd0276f0bb36e0d63c41
|
[
"MIT"
] | null | null | null |
level0/question85.py
|
kevin00000000/Python-programming-exercises
|
87546906d817263ae7ddbd0276f0bb36e0d63c41
|
[
"MIT"
] | null | null | null |
print([x for x in [5,6,77,45,22,12,24] if x%2!=0])
| 50
| 50
| 0.58
| 16
| 50
| 1.8125
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.318182
| 0.12
| 50
| 1
| 50
| 50
| 0.340909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
03f25358349a17779501c9c6fde9408aca823fd1
| 62,406
|
py
|
Python
|
tests/mocks/examples_ttl.py
|
Informasjonsforvaltning/fdk-model-publisher
|
f9f2434436fbbc4e66a9747d8fb2b7f07bb76533
|
[
"Apache-2.0"
] | null | null | null |
tests/mocks/examples_ttl.py
|
Informasjonsforvaltning/fdk-model-publisher
|
f9f2434436fbbc4e66a9747d8fb2b7f07bb76533
|
[
"Apache-2.0"
] | 34
|
2020-10-21T05:54:34.000Z
|
2022-02-04T12:47:55.000Z
|
tests/mocks/examples_ttl.py
|
Informasjonsforvaltning/fdk-model-publisher
|
f9f2434436fbbc4e66a9747d8fb2b7f07bb76533
|
[
"Apache-2.0"
] | null | null | null |
"""Example TTL output."""
# flake8: noqa
ex_1_ttl = """
@prefix dct: <http://purl.org/dc/terms/> .
@prefix foaf: <http://xmlns.com/foaf/0.1/> .
@prefix modelldcatno: <https://data.norge.no/vocabulary/modelldcatno#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5> a modelldcatno:InformationModel ;
dct:hasFormat <http://uri.com> ;
dct:title "Informasjonsmodell - datatjeneste eksempler"@nb ;
dct:type modelldcatno:physicalModel ;
modelldcatno:containsModelElement <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#Eiendom> .
<http://uri.com> a foaf:Document ;
dct:format "http://publications.europa.eu/resource/authority/file-type/JSON"^^dct:MediaType .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#Eiendom> a modelldcatno:ObjectType ;
dct:title "Eiendom"@en ;
modelldcatno:hasProperty <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Eiendom#erstatter>,
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Eiendom#erstattetav>,
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Eiendom#id>,
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Eiendom#nummer>,
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Eiendom#type> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Eiendom#erstatter> a modelldcatno:Attribute ;
dct:title "erstatter"@en ;
xsd:maxOccurs "1" ;
xsd:minOccurs "0" ;
modelldcatno:hasSimpleType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#string> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Eiendom#erstattetav> a modelldcatno:Attribute ;
dct:title "erstattetav"@en ;
xsd:maxOccurs "1" ;
xsd:minOccurs "0" ;
modelldcatno:hasSimpleType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#string> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Eiendom#id> a modelldcatno:Attribute ;
dct:title "id"@en ;
xsd:maxOccurs "1" ;
xsd:minOccurs "1" ;
modelldcatno:hasSimpleType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#string> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Eiendom#nummer> a modelldcatno:Attribute ;
dct:title "nummer"@en ;
xsd:maxOccurs "1" ;
xsd:minOccurs "1" ;
modelldcatno:hasSimpleType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#string> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Eiendom#type> a modelldcatno:Attribute ;
dct:title "type"@en ;
xsd:maxOccurs "1" ;
xsd:minOccurs "1" ;
modelldcatno:hasSimpleType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#string> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#string> a modelldcatno:SimpleType ;
dct:title "string"@en ;
modelldcatno:typeDefinitionReference <https://www.w3.org/2019/wot/json-schema#stringschema> .
"""
ex_2_ttl = """
@prefix dct: <http://purl.org/dc/terms/> .
@prefix foaf: <http://xmlns.com/foaf/0.1/> .
@prefix modelldcatno: <https://data.norge.no/vocabulary/modelldcatno#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5> a modelldcatno:InformationModel ;
dct:hasFormat <http://uri.com> ;
dct:title "Informasjonsmodell - datatjeneste eksempler"@nb ;
dct:type modelldcatno:physicalModel ;
modelldcatno:containsModelElement <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#Eiendom>,
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#EiendomResultat> .
<http://uri.com> a foaf:Document ;
dct:format "http://publications.europa.eu/resource/authority/file-type/JSON"^^dct:MediaType .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#EiendomResultat> a modelldcatno:ObjectType ;
dct:title "EiendomResultat"@en ;
modelldcatno:hasProperty <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/EiendomResultat#code>,
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/EiendomResultat#data>,
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/EiendomResultat#message> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#int32> a modelldcatno:SimpleType ;
dct:title "int32"@en ;
modelldcatno:typeDefinitionReference <https://www.w3.org/2019/wot/json-schema#integerschema> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Eiendom#erstatter> a modelldcatno:Attribute ;
dct:title "erstatter"@en ;
xsd:maxOccurs "1" ;
xsd:minOccurs "0" ;
modelldcatno:hasSimpleType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#string> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Eiendom#erstattetav> a modelldcatno:Attribute ;
dct:title "erstattetav"@en ;
xsd:maxOccurs "1" ;
xsd:minOccurs "0" ;
modelldcatno:hasSimpleType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#string> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Eiendom#id> a modelldcatno:Attribute ;
dct:title "id"@en ;
xsd:maxOccurs "1" ;
xsd:minOccurs "1" ;
modelldcatno:hasSimpleType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#string> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Eiendom#nummer> a modelldcatno:Attribute ;
dct:title "nummer"@en ;
xsd:maxOccurs "1" ;
xsd:minOccurs "1" ;
modelldcatno:hasSimpleType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#string> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Eiendom#type> a modelldcatno:Attribute ;
dct:title "type"@en ;
xsd:maxOccurs "1" ;
xsd:minOccurs "1" ;
modelldcatno:hasSimpleType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#string> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/EiendomResultat#code> a modelldcatno:Attribute ;
dct:title "code"@en ;
xsd:maxOccurs "1" ;
xsd:minOccurs "1" ;
modelldcatno:hasSimpleType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#int32> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/EiendomResultat#data> a modelldcatno:Role ;
dct:title "data"@en ;
xsd:maxOccurs "1" ;
xsd:minOccurs "1" ;
modelldcatno:hasObjectType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#Eiendom> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/EiendomResultat#message> a modelldcatno:Attribute ;
dct:title "message"@en ;
xsd:maxOccurs "1" ;
xsd:minOccurs "1" ;
modelldcatno:hasSimpleType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#string> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#Eiendom> a modelldcatno:ObjectType ;
dct:title "Eiendom"@en ;
modelldcatno:hasProperty <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Eiendom#erstatter>,
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Eiendom#erstattetav>,
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Eiendom#id>,
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Eiendom#nummer>,
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Eiendom#type> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#string> a modelldcatno:SimpleType ;
dct:title "string"@en ;
modelldcatno:typeDefinitionReference <https://www.w3.org/2019/wot/json-schema#stringschema> .
"""
ex_3_ttl = """
@prefix dct: <http://purl.org/dc/terms/> .
@prefix foaf: <http://xmlns.com/foaf/0.1/> .
@prefix modelldcatno: <https://data.norge.no/vocabulary/modelldcatno#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5> a modelldcatno:InformationModel ;
dct:hasFormat <http://uri.com> ;
dct:title "Informasjonsmodell - datatjeneste eksempler"@nb ;
dct:type modelldcatno:physicalModel ;
modelldcatno:containsModelElement <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#Eiendom>,
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#Kommune>,
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#Søk> .
<http://uri.com> a foaf:Document ;
dct:format "http://publications.europa.eu/resource/authority/file-type/JSON"^^dct:MediaType .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#Søk> a modelldcatno:ObjectType ;
dct:title "Søk"@en ;
modelldcatno:hasProperty <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Søk#code>,
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Søk#data>,
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Søk#message> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#date> a modelldcatno:SimpleType ;
dct:title "date"@en ;
modelldcatno:typeDefinitionReference <https://www.w3.org/2019/wot/json-schema#stringschema> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#int32> a modelldcatno:SimpleType ;
dct:title "int32"@en ;
modelldcatno:typeDefinitionReference <https://www.w3.org/2019/wot/json-schema#integerschema> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Eiendom#erstatter> a modelldcatno:Attribute ;
dct:title "erstatter"@en ;
xsd:maxOccurs "1" ;
xsd:minOccurs "0" ;
modelldcatno:hasSimpleType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#string> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Eiendom#erstattetav> a modelldcatno:Attribute ;
dct:title "erstattetav"@en ;
xsd:maxOccurs "1" ;
xsd:minOccurs "0" ;
modelldcatno:hasSimpleType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#string> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Eiendom#id> a modelldcatno:Attribute ;
dct:title "id"@en ;
xsd:maxOccurs "1" ;
xsd:minOccurs "1" ;
modelldcatno:hasSimpleType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#string> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Eiendom#nummer> a modelldcatno:Attribute ;
dct:title "nummer"@en ;
xsd:maxOccurs "1" ;
xsd:minOccurs "1" ;
modelldcatno:hasSimpleType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#string> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Eiendom#type> a modelldcatno:Attribute ;
dct:title "type"@en ;
xsd:maxOccurs "1" ;
xsd:minOccurs "1" ;
modelldcatno:hasSimpleType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#string> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Kommune#fylke> a modelldcatno:Attribute ;
dct:title "fylke"@en ;
xsd:maxOccurs "1" ;
xsd:minOccurs "1" ;
modelldcatno:hasSimpleType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#string> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Kommune#gyldigtil> a modelldcatno:Attribute ;
dct:title "gyldigtil"@en ;
xsd:maxOccurs "1" ;
xsd:minOccurs "0" ;
modelldcatno:hasSimpleType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#date> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Kommune#id> a modelldcatno:Attribute ;
dct:title "id"@en ;
xsd:maxOccurs "1" ;
xsd:minOccurs "1" ;
modelldcatno:hasSimpleType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#string> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Kommune#navn> a modelldcatno:Attribute ;
dct:title "navn"@en ;
xsd:maxOccurs "1" ;
xsd:minOccurs "1" ;
modelldcatno:hasSimpleType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#string> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Kommune#type> a modelldcatno:Attribute ;
dct:title "type"@en ;
xsd:maxOccurs "1" ;
xsd:minOccurs "1" ;
modelldcatno:hasSimpleType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#string> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Søk#code> a modelldcatno:Attribute ;
dct:title "code"@en ;
xsd:maxOccurs "1" ;
xsd:minOccurs "1" ;
modelldcatno:hasSimpleType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#int32> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Søk#data> a modelldcatno:Choice ;
dct:title "data"@en ;
xsd:maxOccurs "*" ;
xsd:minOccurs "1" ;
modelldcatno:hasSome <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#Eiendom>,
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#Kommune> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Søk#message> a modelldcatno:Attribute ;
dct:title "message"@en ;
xsd:maxOccurs "1" ;
xsd:minOccurs "1" ;
modelldcatno:hasSimpleType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#string> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#Eiendom> a modelldcatno:ObjectType ;
dct:title "Eiendom"@en ;
modelldcatno:hasProperty <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Eiendom#erstatter>,
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Eiendom#erstattetav>,
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Eiendom#id>,
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Eiendom#nummer>,
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Eiendom#type> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#Kommune> a modelldcatno:ObjectType ;
dct:title "Kommune"@en ;
modelldcatno:hasProperty <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Kommune#fylke>,
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Kommune#gyldigtil>,
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Kommune#id>,
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Kommune#navn>,
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Kommune#type> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#string> a modelldcatno:SimpleType ;
dct:title "string"@en ;
modelldcatno:typeDefinitionReference <https://www.w3.org/2019/wot/json-schema#stringschema> .
"""
ex_4_ttl = """
@prefix dct: <http://purl.org/dc/terms/> .
@prefix foaf: <http://xmlns.com/foaf/0.1/> .
@prefix modelldcatno: <https://data.norge.no/vocabulary/modelldcatno#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5> a modelldcatno:InformationModel ;
dct:hasFormat <http://uri.com> ;
dct:title "Informasjonsmodell - datatjeneste eksempler"@nb ;
dct:type modelldcatno:physicalModel ;
modelldcatno:containsModelElement <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#Eiendom>,
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#EiendomResultat>,
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#Kommune>,
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#KommuneResultat>,
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#Søk> .
<http://uri.com> a foaf:Document ;
dct:format "http://publications.europa.eu/resource/authority/file-type/JSON"^^dct:MediaType .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#EiendomResultat> a modelldcatno:ObjectType ;
dct:title "EiendomResultat"@en ;
modelldcatno:hasProperty <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/EiendomResultat#code>,
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/EiendomResultat#data>,
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/EiendomResultat#message> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#KommuneResultat> a modelldcatno:ObjectType ;
dct:title "KommuneResultat"@en ;
modelldcatno:hasProperty <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/KommuneResultat#code>,
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/KommuneResultat#data>,
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/KommuneResultat#message> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#Søk> a modelldcatno:ObjectType ;
dct:title "Søk"@en ;
modelldcatno:hasProperty <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Søk#code>,
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Søk#data>,
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Søk#message> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#date> a modelldcatno:SimpleType ;
dct:title "date"@en ;
modelldcatno:typeDefinitionReference <https://www.w3.org/2019/wot/json-schema#stringschema> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#integer> a modelldcatno:SimpleType ;
dct:title "integer"@en ;
modelldcatno:typeDefinitionReference <https://www.w3.org/2019/wot/json-schema#integerschema> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Eiendom#erstatter> a modelldcatno:Attribute ;
dct:title "erstatter"@en ;
xsd:maxOccurs "1" ;
xsd:minOccurs "0" ;
modelldcatno:hasSimpleType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#string> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Eiendom#erstattetav> a modelldcatno:Attribute ;
dct:title "erstattetav"@en ;
xsd:maxOccurs "1" ;
xsd:minOccurs "0" ;
modelldcatno:hasSimpleType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#string> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Eiendom#id> a modelldcatno:Attribute ;
dct:title "id"@en ;
xsd:maxOccurs "1" ;
xsd:minOccurs "1" ;
modelldcatno:hasSimpleType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#string> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Eiendom#nummer> a modelldcatno:Attribute ;
dct:title "nummer"@en ;
xsd:maxOccurs "1" ;
xsd:minOccurs "1" ;
modelldcatno:hasSimpleType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#string> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Eiendom#type> a modelldcatno:Attribute ;
dct:title "type"@en ;
xsd:maxOccurs "1" ;
xsd:minOccurs "1" ;
modelldcatno:hasSimpleType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#string> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/EiendomResultat#code> a modelldcatno:Attribute ;
dct:title "code"@en ;
xsd:maxOccurs "1" ;
xsd:minOccurs "1" ;
modelldcatno:hasSimpleType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#int32> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/EiendomResultat#data> a modelldcatno:Role ;
dct:title "data"@en ;
xsd:maxOccurs "1" ;
xsd:minOccurs "1" ;
modelldcatno:hasObjectType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#Eiendom> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/EiendomResultat#message> a modelldcatno:Attribute ;
dct:title "message"@en ;
xsd:maxOccurs "1" ;
xsd:minOccurs "1" ;
modelldcatno:hasSimpleType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#string> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Kommune#fylke> a modelldcatno:Attribute ;
dct:title "fylke"@en ;
xsd:maxOccurs "1" ;
xsd:minOccurs "1" ;
modelldcatno:hasSimpleType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#string> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Kommune#gyldigtil> a modelldcatno:Attribute ;
dct:title "gyldigtil"@en ;
xsd:maxOccurs "1" ;
xsd:minOccurs "0" ;
modelldcatno:hasSimpleType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#date> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Kommune#id> a modelldcatno:Attribute ;
dct:title "id"@en ;
xsd:maxOccurs "1" ;
xsd:minOccurs "1" ;
modelldcatno:hasSimpleType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#string> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Kommune#navn> a modelldcatno:Attribute ;
dct:title "navn"@en ;
xsd:maxOccurs "1" ;
xsd:minOccurs "1" ;
modelldcatno:hasSimpleType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#string> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Kommune#type> a modelldcatno:Attribute ;
dct:title "type"@en ;
xsd:maxOccurs "1" ;
xsd:minOccurs "1" ;
modelldcatno:hasSimpleType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#string> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/KommuneResultat#code> a modelldcatno:Attribute ;
dct:title "code"@en ;
xsd:maxOccurs "1" ;
xsd:minOccurs "1" ;
modelldcatno:hasSimpleType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#int32> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/KommuneResultat#data> a modelldcatno:Composition ;
dct:title "data"@en ;
xsd:maxOccurs "1" ;
xsd:minOccurs "1" ;
modelldcatno:contains <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/KommuneResultat/data#data> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/KommuneResultat#message> a modelldcatno:Attribute ;
dct:title "message"@en ;
xsd:maxOccurs "1" ;
xsd:minOccurs "1" ;
modelldcatno:hasSimpleType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#string> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/KommuneResultat/data#data> a modelldcatno:ObjectType ;
dct:title "data"@en ;
modelldcatno:hasProperty <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/KommuneResultat/data/data#erstatter>,
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/KommuneResultat/data/data#erstattetav>,
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/KommuneResultat/data/data#kommune> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/KommuneResultat/data/data#erstatter> a modelldcatno:Role ;
dct:title "erstatter"@en ;
xsd:maxOccurs "*" ;
xsd:minOccurs "0" ;
modelldcatno:hasObjectType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#Kommune> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/KommuneResultat/data/data#erstattetav> a modelldcatno:Role ;
dct:title "erstattetav"@en ;
xsd:maxOccurs "*" ;
xsd:minOccurs "0" ;
modelldcatno:hasObjectType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/KommuneResultat/data/data/erstattetav#erstattetav> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/KommuneResultat/data/data#kommune> a modelldcatno:Role ;
dct:title "kommune"@en ;
xsd:maxOccurs "1" ;
xsd:minOccurs "0" ;
modelldcatno:hasObjectType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#Kommune> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/KommuneResultat/data/data/erstattetav#erstattetav> a modelldcatno:ObjectType ;
dct:title "erstattetav"@en ;
modelldcatno:hasProperty <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/KommuneResultat/data/data/erstattetav/erstattetav#id> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/KommuneResultat/data/data/erstattetav/erstattetav#id> a modelldcatno:Attribute ;
dct:title "id"@en ;
xsd:maxOccurs "1" ;
xsd:minOccurs "0" ;
modelldcatno:hasSimpleType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#integer> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Søk#code> a modelldcatno:Attribute ;
dct:title "code"@en ;
xsd:maxOccurs "1" ;
xsd:minOccurs "1" ;
modelldcatno:hasSimpleType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#int32> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Søk#data> a modelldcatno:Choice ;
dct:title "data"@en ;
xsd:maxOccurs "*" ;
xsd:minOccurs "1" ;
modelldcatno:hasSome <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#Eiendom>,
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#Kommune> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Søk#message> a modelldcatno:Attribute ;
dct:title "message"@en ;
xsd:maxOccurs "1" ;
xsd:minOccurs "1" ;
modelldcatno:hasSimpleType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#string> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#Eiendom> a modelldcatno:ObjectType ;
dct:title "Eiendom"@en ;
modelldcatno:hasProperty <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Eiendom#erstatter>,
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Eiendom#erstattetav>,
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Eiendom#id>,
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Eiendom#nummer>,
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Eiendom#type> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#int32> a modelldcatno:SimpleType ;
dct:title "int32"@en ;
modelldcatno:typeDefinitionReference <https://www.w3.org/2019/wot/json-schema#integerschema> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#Kommune> a modelldcatno:ObjectType ;
dct:title "Kommune"@en ;
modelldcatno:hasProperty <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Kommune#fylke>,
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Kommune#gyldigtil>,
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Kommune#id>,
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Kommune#navn>,
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Kommune#type> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#string> a modelldcatno:SimpleType ;
dct:title "string"@en ;
modelldcatno:typeDefinitionReference <https://www.w3.org/2019/wot/json-schema#stringschema> .
"""
ex_5_ttl = """
@prefix dct: <http://purl.org/dc/terms/> .
@prefix foaf: <http://xmlns.com/foaf/0.1/> .
@prefix modelldcatno: <https://data.norge.no/vocabulary/modelldcatno#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5> a modelldcatno:InformationModel ;
dct:hasFormat <http://uri.com> ;
dct:title "Informasjonsmodell - datatjeneste eksempler"@nb ;
dct:type modelldcatno:physicalModel ;
modelldcatno:containsModelElement <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#Account>,
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#AccountStatus>,
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#FinancialInstitution> .
<http://uri.com> a foaf:Document ;
dct:format "http://publications.europa.eu/resource/authority/file-type/JSON"^^dct:MediaType .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#Account> a modelldcatno:ObjectType ;
dct:description "Account: a specification of a clearly defined type of financial events"@en ;
dct:title "Account"@en ;
modelldcatno:hasProperty <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Account#servicer>,
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Account#status> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#string> a modelldcatno:SimpleType ;
dct:title "string"@en ;
modelldcatno:typeDefinitionReference <https://www.w3.org/2019/wot/json-schema#stringschema> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Account#servicer> a modelldcatno:Composition ;
dct:description "account administrator: financial institution that manages an account on behalf of the account owner, including handling the registration of account transactions, calculating the account balance and providing information about the account"@en ;
dct:title "servicer"@en ;
xsd:maxOccurs "1" ;
xsd:minOccurs "1" ;
modelldcatno:contains <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#FinancialInstitution> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Account#status> a modelldcatno:Attribute ;
dct:title "status"@en ;
xsd:maxOccurs "1" ;
xsd:minOccurs "1" ;
modelldcatno:hasSimpleType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#string> ;
modelldcatno:hasValueFrom <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#AccountStatus> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/FinancialInstitution#name> a modelldcatno:Attribute ;
dct:title "name"@en ;
xsd:maxOccurs "1" ;
xsd:minOccurs "1" ;
modelldcatno:hasSimpleType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/FinancialInstitution/name#name> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/FinancialInstitution/name#name> a modelldcatno:SimpleType ;
dct:title "name"@en ;
xsd:maxLength 140 ;
xsd:minLength 1 ;
modelldcatno:typeDefinitionReference <https://www.w3.org/2019/wot/json-schema#stringschema> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#AccountStatus> a modelldcatno:CodeList ;
dct:title "AccountStatus"@en .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#FinancialInstitution> a modelldcatno:ObjectType ;
dct:description "financial institution: Business or other institution involved in finance and banking"@en ;
dct:title "FinancialInstitution"@en ;
modelldcatno:hasProperty <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/FinancialInstitution#name> .
"""
ex_6_ttl = """
@prefix dct: <http://purl.org/dc/terms/> .
@prefix foaf: <http://xmlns.com/foaf/0.1/> .
@prefix modelldcatno: <https://data.norge.no/vocabulary/modelldcatno#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5> a modelldcatno:InformationModel ;
dct:hasFormat <http://uri.com> ;
dct:title "Informasjonsmodell - datatjeneste eksempler"@nb ;
dct:type modelldcatno:physicalModel ;
modelldcatno:containsModelElement <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#ObjA>,
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#ObjC>,
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#ObjD>,
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#string> .
<http://uri.com> a foaf:Document ;
dct:format "http://publications.europa.eu/resource/authority/file-type/JSON"^^dct:MediaType .
<http://uri.com.well-known/skolem/0> a modelldcatno:Role ;
xsd:maxOccurs "1" ;
xsd:minOccurs "0" ;
modelldcatno:hasObjectType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#ObjC> .
<http://uri.com.well-known/skolem/1> a modelldcatno:Role ;
xsd:maxOccurs "1" ;
xsd:minOccurs "0" ;
modelldcatno:hasObjectType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#ObjD> .
<http://uri.com.well-known/skolem/2> a modelldcatno:Attribute ;
xsd:maxOccurs "1" ;
xsd:minOccurs "0" ;
modelldcatno:hasSimpleType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#string> .
<http://uri.com.well-known/skolem/3> a modelldcatno:Attribute ;
dct:description "test f"@en ;
xsd:maxOccurs "1" ;
xsd:minOccurs "0" ;
modelldcatno:hasSimpleType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#string> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#ObjA> a modelldcatno:ObjectType ;
dct:description "Root A"@en ;
dct:title "ObjA"@en ;
modelldcatno:hasProperty <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/ObjA#objB> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/ObjA#objB> a modelldcatno:Composition ;
dct:description "b thing"@en ;
dct:title "objB"@en ;
modelldcatno:contains <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/ObjA/objB#objB> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/ObjA/objB#objB> a modelldcatno:ObjectType ;
modelldcatno:hasProperty <http://uri.com.well-known/skolem/0>,
<http://uri.com.well-known/skolem/1>,
<http://uri.com.well-known/skolem/2>,
<http://uri.com.well-known/skolem/3> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#ObjC> a modelldcatno:ObjectType ;
dct:description "test c"@en ;
dct:title "ObjC"@en .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#ObjD> a modelldcatno:ObjectType ;
dct:description "test d"@en ;
dct:title "ObjD"@en .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#string> a modelldcatno:SimpleType ;
dct:title "string"@en ;
modelldcatno:typeDefinitionReference <https://www.w3.org/2019/wot/json-schema#stringschema> .
"""
ex_7_ttl = """
@prefix dct: <http://purl.org/dc/terms/> .
@prefix foaf: <http://xmlns.com/foaf/0.1/> .
@prefix modelldcatno: <https://data.norge.no/vocabulary/modelldcatno#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5> a modelldcatno:InformationModel ;
dct:hasFormat <http://uri.com> ;
dct:title "Informasjonsmodell - datatjeneste eksempler"@nb ;
dct:type modelldcatno:physicalModel ;
modelldcatno:containsModelElement <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#Balance>,
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#BalanceType> .
<http://uri.com> a foaf:Document ;
dct:format "http://publications.europa.eu/resource/authority/file-type/JSON"^^dct:MediaType .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#Balance> a modelldcatno:ObjectType ;
dct:description "Balance: Sum of deposits and loans in the financial account"@en ;
dct:title "Balance"@en ;
modelldcatno:hasProperty <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Balance#type> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#string> a modelldcatno:SimpleType ;
dct:title "string"@en ;
modelldcatno:typeDefinitionReference <https://www.w3.org/2019/wot/json-schema#stringschema> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Balance#type> a modelldcatno:Attribute ;
dct:title "type"@en ;
xsd:maxOccurs "1" ;
xsd:minOccurs "1" ;
modelldcatno:hasSimpleType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#string> ;
modelldcatno:hasValueFrom <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#BalanceType> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#BalanceType> a modelldcatno:CodeList ;
dct:description "Balance type"@en ;
dct:title "BalanceType"@en .
"""
ex_8_ttl = """
@prefix dct: <http://purl.org/dc/terms/> .
@prefix foaf: <http://xmlns.com/foaf/0.1/> .
@prefix modelldcatno: <https://data.norge.no/vocabulary/modelldcatno#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5> a modelldcatno:InformationModel ;
dct:hasFormat <http://uri.com> ;
dct:title "Informasjonsmodell - datatjeneste eksempler"@nb ;
dct:type modelldcatno:physicalModel ;
modelldcatno:containsModelElement <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#Amount>,
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#CounterParty>,
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#Transaction>,
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#TransactionReference> .
<http://uri.com> a foaf:Document ;
dct:format "http://publications.europa.eu/resource/authority/file-type/JSON"^^dct:MediaType .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#Transaction> a modelldcatno:ObjectType ;
dct:description "Transaction: any posting on an account"@en ;
dct:title "Transaction"@en ;
modelldcatno:hasProperty <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Transaction#additionalInfo>,
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Transaction#amount>,
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Transaction#counterParties>,
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Transaction#merchant>,
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Transaction#references>,
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Transaction#transactionIdentifier> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#string> a modelldcatno:SimpleType ;
dct:title "string"@en ;
modelldcatno:typeDefinitionReference <https://www.w3.org/2019/wot/json-schema#stringschema> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/CounterParty#name> a modelldcatno:Attribute ;
dct:title "name"@en ;
xsd:maxOccurs "1" ;
xsd:minOccurs "0" ;
modelldcatno:hasSimpleType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/CounterParty/name#name> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/CounterParty/name#name> a modelldcatno:SimpleType ;
dct:title "name"@en ;
xsd:maxLength 140 ;
xsd:minLength 1 ;
modelldcatno:typeDefinitionReference <https://www.w3.org/2019/wot/json-schema#stringschema> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Transaction#additionalInfo> a modelldcatno:Attribute ;
dct:description "'Additional information on a transaction: textual description of the contents of a transaction'"@en ;
dct:title "additionalInfo"@en ;
xsd:maxOccurs "1" ;
xsd:minOccurs "0" ;
modelldcatno:hasSimpleType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Transaction/additionalInfo#additionalInfo> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Transaction#amount> a modelldcatno:Attribute ;
dct:title "amount"@en ;
xsd:maxOccurs "1" ;
xsd:minOccurs "1" ;
modelldcatno:hasSimpleType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#Amount> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Transaction#counterParties> a modelldcatno:Role ;
dct:description "Counterparty: another party, the party to whom a transaction is going or coming from"@en ;
dct:title "counterParties"@en ;
xsd:maxOccurs "*" ;
xsd:minOccurs "0" ;
modelldcatno:hasObjectType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#CounterParty> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Transaction#merchant> a modelldcatno:Attribute ;
dct:description "User location: the physical location of the transaction"@en ;
dct:title "merchant"@en ;
xsd:maxOccurs "1" ;
xsd:minOccurs "0" ;
modelldcatno:hasSimpleType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Transaction/merchant#merchant> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Transaction#references> a modelldcatno:Role ;
dct:description "Transaction reference: unique reference associated with the transaction"@en ;
dct:title "references"@en ;
xsd:maxOccurs "*" ;
xsd:minOccurs "0" ;
modelldcatno:hasObjectType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#TransactionReference> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Transaction#transactionIdentifier> a modelldcatno:Attribute ;
dct:description "Transaction Identifier: The identifier for the transaction"@en ;
dct:title "transactionIdentifier"@en ;
xsd:maxOccurs "1" ;
xsd:minOccurs "1" ;
modelldcatno:hasSimpleType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Transaction/transactionIdentifier#transactionIdentifier> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Transaction/additionalInfo#additionalInfo> a modelldcatno:SimpleType ;
dct:title "additionalInfo"@en ;
xsd:maxLength 500 ;
xsd:minLength 1 ;
modelldcatno:typeDefinitionReference <https://www.w3.org/2019/wot/json-schema#stringschema> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Transaction/merchant#merchant> a modelldcatno:SimpleType ;
dct:title "merchant"@en ;
xsd:maxLength 140 ;
xsd:minLength 1 ;
modelldcatno:typeDefinitionReference <https://www.w3.org/2019/wot/json-schema#stringschema> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Transaction/transactionIdentifier#transactionIdentifier> a modelldcatno:SimpleType ;
dct:title "transactionIdentifier"@en ;
xsd:maxLength 35 ;
modelldcatno:typeDefinitionReference <https://www.w3.org/2019/wot/json-schema#stringschema> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/TransactionReference#value> a modelldcatno:Attribute ;
dct:title "value"@en ;
xsd:maxOccurs "1" ;
xsd:minOccurs "1" ;
modelldcatno:hasSimpleType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#string> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#Amount> a modelldcatno:SimpleType ;
dct:title "Amount"@en ;
modelldcatno:typeDefinitionReference <https://www.w3.org/2019/wot/json-schema#numberschema> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#CounterParty> a modelldcatno:ObjectType ;
dct:description "Counterparty: the party to which a transaction goes to or comes from"@en ;
dct:title "CounterParty"@en ;
modelldcatno:hasProperty <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/CounterParty#name> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#TransactionReference> a modelldcatno:ObjectType ;
dct:title "TransactionReference"@en ;
modelldcatno:hasProperty <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/TransactionReference#value> .
"""
ex_9_ttl = """@prefix dct: <http://purl.org/dc/terms/> .
@prefix foaf: <http://xmlns.com/foaf/0.1/> .
@prefix modelldcatno: <https://data.norge.no/vocabulary/modelldcatno#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5> a modelldcatno:InformationModel ;
dct:hasFormat <http://uri.com> ;
dct:title "Informasjonsmodell - datatjeneste eksempler"@nb ;
dct:type modelldcatno:physicalModel ;
modelldcatno:containsModelElement <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#Balance>,
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#string> .
<http://uri.com> a foaf:Document ;
dct:format "http://publications.europa.eu/resource/authority/file-type/JSON"^^dct:MediaType .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#Balance> a modelldcatno:ObjectType ;
dct:description "Balance: Sum of deposits and loans in the financial account"@en ;
dct:title "Balance"@en ;
modelldcatno:hasProperty <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Balance#validCurrencies> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Balance#validCurrencies> a modelldcatno:Role ;
dct:title "validCurrencies"@en ;
xsd:maxOccurs "*" ;
xsd:minOccurs "1" ;
modelldcatno:hasObjectType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Balance#validCurrenciesArray> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Balance#validCurrenciesArray> a modelldcatno:ObjectType ;
dct:title "validCurrenciesArray"@en ;
modelldcatno:hasProperty <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Balance/validCurrencies#items> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5/Balance/validCurrencies#items> a modelldcatno:Attribute ;
dct:title "items"@en ;
xsd:maxOccurs "*" ;
xsd:minOccurs "0" ;
modelldcatno:hasSimpleType <https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#string> .
<https://publishers.staging.fellesdatakatalog.digdir.no/fdk-model-publisher/catalog/e3a031fe644565af181e1049ab3e99754ddc60d5#string> a modelldcatno:SimpleType ;
dct:title "string"@en ;
modelldcatno:typeDefinitionReference <https://www.w3.org/2019/wot/json-schema#stringschema> ."""
| 75.460701
| 264
| 0.806493
| 6,148
| 62,406
| 8.184938
| 0.034483
| 0.084358
| 0.123726
| 0.219332
| 0.973629
| 0.949405
| 0.94706
| 0.942569
| 0.942529
| 0.942529
| 0
| 0.133419
| 0.073999
| 62,406
| 826
| 265
| 75.552058
| 0.737212
| 0.000529
| 0
| 0.73862
| 0
| 0.464023
| 0.997226
| 0.101272
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
ff16b6b5513f0e50420c63ca75bdfbd8cdc5edb6
| 4,279
|
py
|
Python
|
tests/test_results.py
|
ncheikh/pymarketstore
|
bfac7883af7b13db460aa509f00ccf9102ead2e3
|
[
"Apache-2.0"
] | 100
|
2018-02-02T00:43:25.000Z
|
2022-03-20T16:31:50.000Z
|
tests/test_results.py
|
ncheikh/pymarketstore
|
bfac7883af7b13db460aa509f00ccf9102ead2e3
|
[
"Apache-2.0"
] | 40
|
2018-02-07T02:35:08.000Z
|
2022-03-25T14:58:47.000Z
|
tests/test_results.py
|
ncheikh/pymarketstore
|
bfac7883af7b13db460aa509f00ccf9102ead2e3
|
[
"Apache-2.0"
] | 39
|
2018-03-10T21:20:22.000Z
|
2022-02-23T07:11:12.000Z
|
from ast import literal_eval
from pymarketstore import results
testdata1 = literal_eval(r"""
{'responses': [{'result': {'data': [b'\xf4\xe8^Z\x00\x00\x00\x000\xe9^Z\x00\x00\x00\x00l\xe9^Z\x00\x00\x00\x00\xa8\xe9^Z\x00\x00\x00\x00\xe4\xe9^Z\x00\x00\x00\x00',
b'{\x14\xaeG\x01\xf4\xc5@H\xe1z\x14\xee\xe1\xc5@\x00\x00\x00\x00\x80\xfb\xc5@\x00\x00\x00\x00\x00\x06\xc6@{\x14\xaeG\x01\xfa\xc5@',
b'{\x14\xaeG\x01\xf4\xc5@\x00\x00\x00\x00\x00\xf9\xc5@\x00\x00\x00\x00\x00\x06\xc6@\x00\x00\x00\x00\x00\x06\xc6@\x00\x00\x00\x00\x00\xfe\xc5@',
b'\x85\xebQ\xb8^\xe0\xc5@H\xe1z\x14\xee\xe1\xc5@\x00\x00\x00\x00\x80\xfb\xc5@R\xb8\x1e\x85+\xf7\xc5@{\x14\xaeG\x01\xfa\xc5@',
b'H\xe1z\x14\xee\xe1\xc5@\x00\x00\x00\x00\x00\xf9\xc5@\x00\x00\x00\x00\x00\x06\xc6@{\x14\xaeG\x01\xfa\xc5@\x85\xebQ\xb8\xfe\xfd\xc5@',
b'iL\xd2F\xbf\xaf\n@\xfe\xe6\xff49\xfd\x0b@\xe1\x9b\xe8\xeb\xe01\x10@\xaf\xe4\x11y\x1e\xce\xfa?\xd7\xd2\x8a\x0c\xfe\x00\xf9?'],
'length': 5,
'lengths': {'BTC/1Min/OHLCV:Symbol/Timeframe/AttributeGroup': 5},
'names': ['Epoch', 'Open', 'High', 'Low', 'Close', 'Volume'],
'startindex': {'BTC/1Min/OHLCV:Symbol/Timeframe/AttributeGroup': 0},
'types': ['i8', 'f8', 'f8', 'f8', 'f8', 'f8']}}],
'timezone': 'UTC',
'version': 'dev'}
""") # noqa: E501
testdata2 = literal_eval(r"""
{'responses': [{'result': {'data': [b'l\xe9^Z\x00\x00\x00\x00\xa8\xe9^Z\x00\x00\x00\x00\xe4\xe9^Z\x00\x00\x00\x00 \xea^Z\x00\x00\x00\x00\\\xea^Z\x00\x00\x00\x00l\xe9^Z\x00\x00\x00\x00\xa8\xe9^Z\x00\x00\x00\x00\xe4\xe9^Z\x00\x00\x00\x00 \xea^Z\x00\x00\x00\x00\\\xea^Z\x00\x00\x00\x00',
b'\x00\x00\x00\x00\x00\x88\x8f@)\\\x8f\xc2\xf5\x90\x8f@\xa4p=\n\xd7\x8f\x8f@\xcd\xcc\xcc\xcc\xcc\xa8\x8f@\x00\x00\x00\x00\x00\xb0\x8f@\x00\x00\x00\x00\x80\xfb\xc5@\x00\x00\x00\x00\x00\x06\xc6@{\x14\xaeG\x01\xfa\xc5@\x00\x00\x00\x00 \x02\xc6@\x00\x00\x00\x00\x00\x06\xc6@',
b'\x00\x00\x00\x00\x00\xb0\x8f@fffff\xa2\x8f@\x00\x00\x00\x00\x00\xa8\x8f@\x00\x00\x00\x00\x00\xb0\x8f@\x00\x00\x00\x00\x00\xb0\x8f@\x00\x00\x00\x00\x00\x06\xc6@\x00\x00\x00\x00\x00\x06\xc6@\x00\x00\x00\x00\x00\x06\xc6@\x00\x00\x00\x00\x00\x06\xc6@\x00\x00\x00\x00\x00\x06\xc6@',
b'\x00\x00\x00\x00\x00\x88\x8f@{\x14\xaeG\xe1\x84\x8f@\xa4p=\n\xd7\x8f\x8f@\xf6(\\\x8f\xc2\xa7\x8f@\x00\x00\x00\x00\x00\xb0\x8f@\x00\x00\x00\x00\x80\xfb\xc5@R\xb8\x1e\x85+\xf7\xc5@\\\x8f\xc2\xf5h\xf0\xc5@\x00\x00\x00\x00 \x02\xc6@\x00\x00\x00\x00\x00\x06\xc6@',
b'\xd7\xa3p=\n\x99\x8f@\xa4p=\n\xd7\x8f\x8f@\x00\x00\x00\x00\x00\xa8\x8f@\x00\x00\x00\x00\x00\xb0\x8f@\x00\x00\x00\x00\x00\xb0\x8f@\x00\x00\x00\x00\x00\x06\xc6@{\x14\xaeG\x01\xfa\xc5@\x85\xebQ\xb8\x1e\x02\xc6@\x00\x00\x00\x00\x00\x06\xc6@\x00\x00\x00\x00\x00\x06\xc6@',
b'f\r\x83\x9erg8@j\xa8\xcd\x0f\x8e\xdf<@\x7f\xc7\xa6Ku\xbcP@AG\xe5\x05\xdc\x1aU@\xdc\xb1d\xd0\x012+@\xe1\x9b\xe8\xeb\xe01\x10@\xaf\xe4\x11y\x1e\xce\xfa?\xa2\x9a\xa3\xd8\x1bb\x19@s!\xc1\x1a\x88\xa9/@\xbaI\x0c\x02+\x87\xf4?'],
'length': 10,
'lengths': {'BTC/1Min/OHLCV:Symbol/Timeframe/AttributeGroup': 5,
'ETH/1Min/OHLCV:Symbol/Timeframe/AttributeGroup': 5},
'names': ['Epoch', 'Open', 'High', 'Low', 'Close', 'Volume'],
'startindex': {'BTC/1Min/OHLCV:Symbol/Timeframe/AttributeGroup': 5,
'ETH/1Min/OHLCV:Symbol/Timeframe/AttributeGroup': 0},
'types': ['i8', 'f8', 'f8', 'f8', 'f8', 'f8']}}],
'timezone': 'America/New_York',
'version': 'dev'}
""") # noqa: E501
def test_results():
reply = results.QueryReply.from_response(testdata1)
assert reply.timezone == 'UTC'
assert str(
reply) == """QueryReply(QueryResult(DataSet(key=BTC/1Min/OHLCV, shape=(5,), dtype=[('Epoch', '<i8'), ('Open', '<f8'), ('High', '<f8'), ('Low', '<f8'), ('Close', '<f8'), ('Volume', '<f8')])))""" # noqa
assert reply.first().timezone == 'UTC'
assert reply.first().symbol == 'BTC'
assert reply.first().timeframe == '1Min'
assert reply.first().attribute_group == 'OHLCV'
assert reply.first().df().shape == (5, 5)
assert list(reply.by_symbols().keys()) == ['BTC']
assert reply.keys() == ['BTC/1Min/OHLCV']
assert reply.symbols() == ['BTC']
assert reply.timeframes() == ['1Min']
reply = results.QueryReply.from_response(testdata2)
assert str(reply.first().df().index.tzinfo) == 'America/New_York'
| 76.410714
| 284
| 0.65132
| 784
| 4,279
| 3.542092
| 0.215561
| 0.380266
| 0.408354
| 0.328412
| 0.674469
| 0.648902
| 0.615412
| 0.590205
| 0.574001
| 0.57292
| 0
| 0.20687
| 0.081561
| 4,279
| 55
| 285
| 77.8
| 0.499746
| 0.006076
| 0
| 0.16
| 0
| 0.34
| 0.819016
| 0.632384
| 0
| 0
| 0
| 0
| 0.24
| 1
| 0.02
| false
| 0
| 0.04
| 0
| 0.06
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
205e027cb9e1138da201beeb021c1d67ff383444
| 1,418
|
py
|
Python
|
tests/tracer/test_constants.py
|
p7g/dd-trace-py
|
141ac0ab6e9962e3b3bafc9de172076075289a19
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/tracer/test_constants.py
|
p7g/dd-trace-py
|
141ac0ab6e9962e3b3bafc9de172076075289a19
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/tracer/test_constants.py
|
p7g/dd-trace-py
|
141ac0ab6e9962e3b3bafc9de172076075289a19
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
import warnings
import pytest
def test_deprecated():
import ddtrace
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always")
assert ddtrace.constants.FILTERS_KEY
(w,) = ws
assert issubclass(w.category, DeprecationWarning)
assert "ddtrace.constants.FILTERS_KEY is deprecated and will be removed in version '1.0.0'" == str(w.message)
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always")
assert ddtrace.constants.NUMERIC_TAGS
(w,) = ws
assert issubclass(w.category, DeprecationWarning)
assert "ddtrace.constants.NUMERIC_TAGS is deprecated and will be removed in version '1.0.0'" == str(w.message)
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always")
assert ddtrace.constants.LOG_SPAN_KEY
(w,) = ws
assert issubclass(w.category, DeprecationWarning)
assert "ddtrace.constants.LOG_SPAN_KEY is deprecated and will be removed in version '1.0.0'" == str(w.message)
def test_not_deprecated():
import ddtrace
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always")
assert ddtrace.constants.ENV_KEY
assert len(ws) == 0
def test_invalid():
with pytest.raises(ImportError):
from ddtrace.constants import INVALID_CONSTANT # noqa
| 28.36
| 118
| 0.689704
| 177
| 1,418
| 5.423729
| 0.265537
| 0.133333
| 0.160417
| 0.104167
| 0.846875
| 0.809375
| 0.794792
| 0.794792
| 0.794792
| 0.794792
| 0
| 0.009025
| 0.218618
| 1,418
| 49
| 119
| 28.938776
| 0.857401
| 0.002821
| 0
| 0.516129
| 0
| 0
| 0.192635
| 0.063031
| 0
| 0
| 0
| 0
| 0.354839
| 1
| 0.096774
| true
| 0
| 0.193548
| 0
| 0.290323
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2072879e9ec6f7bad13c73ad9346edf525e2d260
| 4,918
|
py
|
Python
|
sitri/contrib/json.py
|
gitter-badger/sitri
|
fd2e7f2dfede0f3565b0dc9f9b8ae7a1bc94f7e2
|
[
"MIT"
] | null | null | null |
sitri/contrib/json.py
|
gitter-badger/sitri
|
fd2e7f2dfede0f3565b0dc9f9b8ae7a1bc94f7e2
|
[
"MIT"
] | null | null | null |
sitri/contrib/json.py
|
gitter-badger/sitri
|
fd2e7f2dfede0f3565b0dc9f9b8ae7a1bc94f7e2
|
[
"MIT"
] | null | null | null |
import json
import os
import typing
from ..config.providers import ConfigProvider
from ..credentials.providers import CredentialProvider
class JsonConfigProvider(ConfigProvider):
"""Config provider for JSON"""
provider_code = "json"
def __init__(self, json_path: str = "./data.yaml", json_data: str = None, default_separator: str = "."):
"""
:param json_path: path to json file
:param json_data: data of json
:param default_separator: default value separator for path-mode
"""
if not json_data:
self._json = json.load(open(os.path.abspath(json_path)))
else:
self._json = json.loads(json_data)
self.separator = default_separator
def _get_by_path(self, path: str, separator: str) -> typing.Any:
"""Retrieve value from a dictionary using a list of keys.
:param path: string with separated keys
"""
dict_local = self._json.copy()
keys = path.split(separator)
for key in keys:
try:
dict_local = dict_local[int(key)] if key.isdigit() else dict_local[key]
except Exception:
if key not in dict_local:
return None
dict_local = dict_local[key]
return dict_local
def _get_by_key(self, key: str) -> typing.Any:
"""Retrieve value from a dictionary using a key.
:param key: key from json
"""
if key in self._json:
return self._json[key]
else:
return None
def get(self, key: str, path_mode: bool = False, separator: str = None) -> typing.Optional[typing.Any]:
"""Get value from json
:param key: key or path for search
:param path_mode: boolean mode switcher
:param separator: separator for path keys in path mode
"""
separator = separator if separator else self.separator
if path_mode:
return self._get_by_path(key, separator=separator)
return self._get_by_key(key)
def keys(self, path_mode: bool = False, separator: str = None) -> typing.List[str]:
"""Keys in json
:param path_mode: [future] path mode for keys list
:param separator: [future] separators for keys in path mode
"""
# TODO: implemented path-mode for keys list
if not path_mode:
return self._json.keys()
else:
raise NotImplementedError("Path-mode not implemented!")
class JsonCredentialProvider(CredentialProvider):
"""Credential provider for JSON"""
provider_code = "json"
def __init__(self, json_path: str = "./data.yaml", json_data: str = None, default_separator: str = "."):
"""
:param json_path: path to json file
:param json_data: data of json
:param default_separator: default value separator for path-mode
"""
if not json_data:
self._json = json.load(open(os.path.abspath(json_path)))
else:
self._json = json.loads(json_data)
self.separator = default_separator
def _get_by_path(self, path: str, separator: str) -> typing.Any:
"""Retrieve value from a dictionary using a list of keys.
:param path: string with separated keys
"""
dict_local = self._json.copy()
keys = path.split(separator)
for key in keys:
try:
dict_local = dict_local[int(key)] if key.isdigit() else dict_local[key]
except Exception:
if key not in dict_local:
return None
dict_local = dict_local[key]
return dict_local
def _get_by_key(self, key: str) -> typing.Any:
"""Retrieve value from a dictionary using a key.
:param key: key from json
"""
if key in self._json:
return self._json[key]
else:
return None
def get(self, key: str, path_mode: bool = False, separator: str = None) -> typing.Optional[typing.Any]:
"""Get value from json
:param key: key or path for search
:param path_mode: boolean mode switcher
:param separator: separator for path keys in path mode
"""
separator = separator if separator else self.separator
if path_mode:
return self._get_by_path(key, separator=separator)
return self._get_by_key(key)
def keys(self, path_mode: bool = False, separator: str = None) -> typing.List[str]:
"""Keys in json
:param path_mode: [future] path mode for keys list
:param separator: [future] separators for keys in path mode
"""
# TODO: implemented path-mode for keys list
if not path_mode:
return self._json.keys()
else:
raise NotImplementedError("Path-mode not implemented!")
| 30.171779
| 108
| 0.602074
| 616
| 4,918
| 4.654221
| 0.125
| 0.066969
| 0.022323
| 0.027904
| 0.925009
| 0.925009
| 0.925009
| 0.925009
| 0.925009
| 0.925009
| 0
| 0
| 0.308865
| 4,918
| 162
| 109
| 30.358025
| 0.843483
| 0.262302
| 0
| 0.90411
| 0
| 0
| 0.025362
| 0
| 0
| 0
| 0
| 0.012346
| 0
| 1
| 0.136986
| false
| 0
| 0.068493
| 0
| 0.452055
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
208cb6d2a8a812e87586247f183508061944b498
| 165
|
py
|
Python
|
inflearn_machine_learning/lab_asssigment/2_lab_build_matrix/windows/build_matrix.py
|
Junhojuno/TIL
|
c252b62b94dc519ccd528c2cd8b638e85adee89c
|
[
"MIT"
] | null | null | null |
inflearn_machine_learning/lab_asssigment/2_lab_build_matrix/windows/build_matrix.py
|
Junhojuno/TIL
|
c252b62b94dc519ccd528c2cd8b638e85adee89c
|
[
"MIT"
] | null | null | null |
inflearn_machine_learning/lab_asssigment/2_lab_build_matrix/windows/build_matrix.py
|
Junhojuno/TIL
|
c252b62b94dc519ccd528c2cd8b638e85adee89c
|
[
"MIT"
] | 3
|
2018-05-23T03:33:41.000Z
|
2018-07-09T14:34:15.000Z
|
import numpy as np
import pandas as pd
def get_rating_matrix(filename, dtype=np.float32):
pass
def get_frequent_matrix(filename, dtype=np.float32):
pass
| 15
| 52
| 0.757576
| 26
| 165
| 4.653846
| 0.576923
| 0.099174
| 0.31405
| 0.347107
| 0.528926
| 0.528926
| 0
| 0
| 0
| 0
| 0
| 0.029197
| 0.169697
| 165
| 10
| 53
| 16.5
| 0.854015
| 0
| 0
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
20bcb9f04c473c8a6de9169d12767d2ce9ad07c1
| 191
|
py
|
Python
|
packages/service-integration/tests/test_pytest_plugin.py
|
colinRawlings/osparc-simcore
|
bf2f18d5bc1e574d5f4c238d08ad15156184c310
|
[
"MIT"
] | 25
|
2018-04-13T12:44:12.000Z
|
2022-03-12T15:01:17.000Z
|
packages/service-integration/tests/test_pytest_plugin.py
|
colinRawlings/osparc-simcore
|
bf2f18d5bc1e574d5f4c238d08ad15156184c310
|
[
"MIT"
] | 2,553
|
2018-01-18T17:11:55.000Z
|
2022-03-31T16:26:40.000Z
|
packages/service-integration/tests/test_pytest_plugin.py
|
mrnicegyu11/osparc-simcore
|
b6fa6c245dbfbc18cc74a387111a52de9b05d1f4
|
[
"MIT"
] | 20
|
2018-01-18T19:45:33.000Z
|
2022-03-29T07:08:47.000Z
|
# pylint:disable=unused-variable
# pylint:disable=unused-argument
# pylint:disable=redefined-outer-name
# TODO: using https://docs.pytest.org/en/stable/writing_plugins.html#testing-plugins
| 27.285714
| 84
| 0.801047
| 26
| 191
| 5.846154
| 0.769231
| 0.256579
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.062827
| 191
| 6
| 85
| 31.833333
| 0.849162
| 0.937173
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0.166667
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
20c3402a92fd35b8c5f99a9925234153e9b1d7f0
| 16,721
|
bzl
|
Python
|
3rdparty/target_file.bzl
|
ianoc/skeleton
|
8b5a6eb4529f5f7acef7cb13dcf407b6653bf9f4
|
[
"Apache-2.0"
] | null | null | null |
3rdparty/target_file.bzl
|
ianoc/skeleton
|
8b5a6eb4529f5f7acef7cb13dcf407b6653bf9f4
|
[
"Apache-2.0"
] | 1
|
2020-03-04T08:47:04.000Z
|
2020-03-04T16:20:30.000Z
|
3rdparty/target_file.bzl
|
ianoc/skeleton
|
8b5a6eb4529f5f7acef7cb13dcf407b6653bf9f4
|
[
"Apache-2.0"
] | 1
|
2021-02-09T04:40:32.000Z
|
2021-02-09T04:40:32.000Z
|
# Do not edit. bazel-deps autogenerates this file from.
_JAVA_LIBRARY_TEMPLATE = """
java_library(
name = "{name}",
exports = [
{exports}
],
runtime_deps = [
{runtime_deps}
],
visibility = [
"{visibility}"
]
)\n"""
_SCALA_IMPORT_TEMPLATE = """
scala_import(
name = "{name}",
exports = [
{exports}
],
jars = [
{jars}
],
runtime_deps = [
{runtime_deps}
],
visibility = [
"{visibility}"
]
)
"""
_SCALA_LIBRARY_TEMPLATE = """
scala_library(
name = "{name}",
exports = [
{exports}
],
runtime_deps = [
{runtime_deps}
],
visibility = [
"{visibility}"
]
)
"""
def _build_external_workspace_from_opts_impl(ctx):
build_header = ctx.attr.build_header
separator = ctx.attr.separator
target_configs = ctx.attr.target_configs
result_dict = {}
for key, cfg in target_configs.items():
build_file_to_target_name = key.split(":")
build_file = build_file_to_target_name[0]
target_name = build_file_to_target_name[1]
if build_file not in result_dict:
result_dict[build_file] = []
result_dict[build_file].append(cfg)
for key, file_entries in result_dict.items():
build_file_contents = build_header + '\n\n'
for build_target in file_entries:
entry_map = {}
for entry in build_target:
elements = entry.split(separator)
build_entry_key = elements[0]
if elements[1] == "L":
entry_map[build_entry_key] = [e for e in elements[2::] if len(e) > 0]
elif elements[1] == "B":
entry_map[build_entry_key] = (elements[2] == "true" or elements[2] == "True")
else:
entry_map[build_entry_key] = elements[2]
exports_str = ""
for e in entry_map.get("exports", []):
exports_str += "\"" + e + "\",\n"
jars_str = ""
for e in entry_map.get("jars", []):
jars_str += "\"" + e + "\",\n"
runtime_deps_str = ""
for e in entry_map.get("runtimeDeps", []):
runtime_deps_str += "\"" + e + "\",\n"
name = entry_map["name"].split(":")[1]
if entry_map["lang"] == "java":
build_file_contents += _JAVA_LIBRARY_TEMPLATE.format(name = name, exports=exports_str, runtime_deps=runtime_deps_str, visibility=entry_map["visibility"])
elif entry_map["lang"].startswith("scala") and entry_map["kind"] == "import":
build_file_contents += _SCALA_IMPORT_TEMPLATE.format(name = name, exports=exports_str, jars=jars_str, runtime_deps=runtime_deps_str, visibility=entry_map["visibility"])
elif entry_map["lang"].startswith("scala") and entry_map["kind"] == "library":
build_file_contents += _SCALA_LIBRARY_TEMPLATE.format(name = name, exports=exports_str, runtime_deps=runtime_deps_str, visibility=entry_map["visibility"])
else:
print(entry_map)
ctx.file(ctx.path(key + "/BUILD"), build_file_contents, False)
return None
build_external_workspace_from_opts = repository_rule(
attrs = {
"target_configs": attr.string_list_dict(mandatory = True),
"separator": attr.string(mandatory = True),
"build_header": attr.string(mandatory = True),
},
implementation = _build_external_workspace_from_opts_impl
)
def build_header():
return """"""
def list_target_data_separator():
return "|||"
def list_target_data():
return {
"3rdparty/jvm/com/google/code/findbugs:jsr305": ["lang||||||java","name||||||//3rdparty/jvm/com/google/code/findbugs:jsr305","visibility||||||//visibility:public","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/com/google/code/findbugs/jsr305","runtimeDeps|||L|||","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/com/google/code/gson:gson": ["lang||||||java","name||||||//3rdparty/jvm/com/google/code/gson:gson","visibility||||||//3rdparty/jvm:__subpackages__","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/com/google/code/gson/gson","runtimeDeps|||L|||","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/com/google/dagger:dagger": ["lang||||||java","name||||||//3rdparty/jvm/com/google/dagger:dagger","visibility||||||//visibility:public","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/com/google/dagger/dagger","runtimeDeps|||L|||//3rdparty/jvm/javax/inject:javax_inject","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/com/google/dagger:dagger_compiler": ["lang||||||java","name||||||//3rdparty/jvm/com/google/dagger:dagger_compiler","visibility||||||//visibility:public","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/com/google/dagger/dagger_compiler","runtimeDeps|||L|||//3rdparty/jvm/com/google/guava:guava|||//3rdparty/jvm/com/squareup:javapoet|||//3rdparty/jvm/org/checkerframework:checker_compat_qual|||//3rdparty/jvm/com/google/guava:failureaccess|||//3rdparty/jvm/com/google/code/findbugs:jsr305|||//3rdparty/jvm/com/google/googlejavaformat:google_java_format|||//3rdparty/jvm/net/ltgt/gradle/incap:incap|||//3rdparty/jvm/org/jetbrains/kotlin:kotlin_stdlib|||//3rdparty/jvm/org/jetbrains/kotlinx:kotlinx_metadata_jvm|||//3rdparty/jvm/com/google/dagger:dagger_spi|||//3rdparty/jvm/com/google/dagger:dagger_producers|||//3rdparty/jvm/javax/inject:javax_inject|||//3rdparty/jvm/javax/annotation:jsr250_api|||//3rdparty/jvm/com/google/dagger:dagger","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/com/google/dagger:dagger_producers": ["lang||||||java","name||||||//3rdparty/jvm/com/google/dagger:dagger_producers","visibility||||||//visibility:public","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/com/google/dagger/dagger_producers","runtimeDeps|||L|||//3rdparty/jvm/com/google/guava:guava|||//3rdparty/jvm/org/checkerframework:checker_compat_qual|||//3rdparty/jvm/com/google/guava:failureaccess|||//3rdparty/jvm/javax/inject:javax_inject|||//3rdparty/jvm/com/google/dagger:dagger","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/com/google/dagger:dagger_spi": ["lang||||||java","name||||||//3rdparty/jvm/com/google/dagger:dagger_spi","visibility||||||//visibility:public","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/com/google/dagger/dagger_spi","runtimeDeps|||L|||//3rdparty/jvm/com/google/guava:guava|||//3rdparty/jvm/com/squareup:javapoet|||//3rdparty/jvm/com/google/guava:failureaccess|||//3rdparty/jvm/com/google/code/findbugs:jsr305|||//3rdparty/jvm/com/google/dagger:dagger_producers|||//3rdparty/jvm/javax/inject:javax_inject|||//3rdparty/jvm/com/google/dagger:dagger","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/com/google/errorprone:error_prone_annotations": ["lang||||||java","name||||||//3rdparty/jvm/com/google/errorprone:error_prone_annotations","visibility||||||//3rdparty/jvm:__subpackages__","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/com/google/errorprone/error_prone_annotations","runtimeDeps|||L|||","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/com/google/errorprone:javac_shaded": ["lang||||||java","name||||||//3rdparty/jvm/com/google/errorprone:javac_shaded","visibility||||||//3rdparty/jvm:__subpackages__","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/com/google/errorprone/javac_shaded","runtimeDeps|||L|||","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/com/google/googlejavaformat:google_java_format": ["lang||||||java","name||||||//3rdparty/jvm/com/google/googlejavaformat:google_java_format","visibility||||||//3rdparty/jvm:__subpackages__","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/com/google/googlejavaformat/google_java_format","runtimeDeps|||L|||//3rdparty/jvm/com/google/guava:guava|||//3rdparty/jvm/com/google/errorprone:javac_shaded","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/com/google/guava:failureaccess": ["lang||||||java","name||||||//3rdparty/jvm/com/google/guava:failureaccess","visibility||||||//3rdparty/jvm:__subpackages__","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/com/google/guava/failureaccess","runtimeDeps|||L|||","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/com/google/guava:guava": ["lang||||||java","name||||||//3rdparty/jvm/com/google/guava:guava","visibility||||||//visibility:public","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/com/google/guava/guava","runtimeDeps|||L|||//3rdparty/jvm/com/google/errorprone:error_prone_annotations|||//3rdparty/jvm/com/google/guava:failureaccess|||//3rdparty/jvm/com/google/code/findbugs:jsr305|||//3rdparty/jvm/com/google/guava:listenablefuture|||//3rdparty/jvm/com/google/j2objc:j2objc_annotations|||//3rdparty/jvm/org/checkerframework:checker_qual","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/com/google/guava:listenablefuture": ["lang||||||java","name||||||//3rdparty/jvm/com/google/guava:listenablefuture","visibility||||||//3rdparty/jvm:__subpackages__","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/com/google/guava/listenablefuture","runtimeDeps|||L|||","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/com/google/j2objc:j2objc_annotations": ["lang||||||java","name||||||//3rdparty/jvm/com/google/j2objc:j2objc_annotations","visibility||||||//3rdparty/jvm:__subpackages__","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/com/google/j2objc/j2objc_annotations","runtimeDeps|||L|||","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/com/google/protobuf:protobuf_java": ["lang||||||java","name||||||//3rdparty/jvm/com/google/protobuf:protobuf_java","visibility||||||//visibility:public","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/com/google/protobuf/protobuf_java","runtimeDeps|||L|||","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/com/google/protobuf:protobuf_java_util": ["lang||||||java","name||||||//3rdparty/jvm/com/google/protobuf:protobuf_java_util","visibility||||||//visibility:public","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/com/google/protobuf/protobuf_java_util","runtimeDeps|||L|||//3rdparty/jvm/com/google/protobuf:protobuf_java|||//3rdparty/jvm/com/google/guava:guava|||//3rdparty/jvm/com/google/code/gson:gson","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/com/squareup:javapoet": ["lang||||||java","name||||||//3rdparty/jvm/com/squareup:javapoet","visibility||||||//3rdparty/jvm:__subpackages__","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/com/squareup/javapoet","runtimeDeps|||L|||","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/commons_io:commons_io": ["lang||||||java","name||||||//3rdparty/jvm/commons_io:commons_io","visibility||||||//visibility:public","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/commons_io/commons_io","runtimeDeps|||L|||","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/javax/annotation:jsr250_api": ["lang||||||java","name||||||//3rdparty/jvm/javax/annotation:jsr250_api","visibility||||||//visibility:public","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/javax/annotation/jsr250_api","runtimeDeps|||L|||","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/javax/inject:javax_inject": ["lang||||||java","name||||||//3rdparty/jvm/javax/inject:javax_inject","visibility||||||//visibility:public","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/javax/inject/javax_inject","runtimeDeps|||L|||","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/net/ltgt/gradle/incap:incap": ["lang||||||java","name||||||//3rdparty/jvm/net/ltgt/gradle/incap:incap","visibility||||||//3rdparty/jvm:__subpackages__","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/net/ltgt/gradle/incap/incap","runtimeDeps|||L|||","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/org/checkerframework:checker_compat_qual": ["lang||||||java","name||||||//3rdparty/jvm/org/checkerframework:checker_compat_qual","visibility||||||//3rdparty/jvm:__subpackages__","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/org/checkerframework/checker_compat_qual","runtimeDeps|||L|||","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/org/checkerframework:checker_qual": ["lang||||||java","name||||||//3rdparty/jvm/org/checkerframework:checker_qual","visibility||||||//3rdparty/jvm:__subpackages__","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/org/checkerframework/checker_qual","runtimeDeps|||L|||","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/org/jetbrains:annotations": ["lang||||||java","name||||||//3rdparty/jvm/org/jetbrains:annotations","visibility||||||//3rdparty/jvm:__subpackages__","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/org/jetbrains/annotations","runtimeDeps|||L|||","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/org/jetbrains/kotlin:kotlin_stdlib": ["lang||||||java","name||||||//3rdparty/jvm/org/jetbrains/kotlin:kotlin_stdlib","visibility||||||//3rdparty/jvm:__subpackages__","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/org/jetbrains/kotlin/kotlin_stdlib","runtimeDeps|||L|||//3rdparty/jvm/org/jetbrains/kotlin:kotlin_stdlib_common|||//3rdparty/jvm/org/jetbrains:annotations","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/org/jetbrains/kotlin:kotlin_stdlib_common": ["lang||||||java","name||||||//3rdparty/jvm/org/jetbrains/kotlin:kotlin_stdlib_common","visibility||||||//3rdparty/jvm:__subpackages__","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/org/jetbrains/kotlin/kotlin_stdlib_common","runtimeDeps|||L|||","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"],
"3rdparty/jvm/org/jetbrains/kotlinx:kotlinx_metadata_jvm": ["lang||||||java","name||||||//3rdparty/jvm/org/jetbrains/kotlinx:kotlinx_metadata_jvm","visibility||||||//3rdparty/jvm:__subpackages__","kind||||||library","deps|||L|||","jars|||L|||","sources|||L|||","exports|||L|||//external:jar/org/jetbrains/kotlinx/kotlinx_metadata_jvm","runtimeDeps|||L|||//3rdparty/jvm/org/jetbrains/kotlin:kotlin_stdlib","processorClasses|||L|||","generatesApi|||B|||false","licenses|||L|||","generateNeverlink|||B|||false"]
}
def build_external_workspace(name):
return build_external_workspace_from_opts(name = name, target_configs = list_target_data(), separator = list_target_data_separator(), build_header = build_header())
| 107.877419
| 1,102
| 0.666766
| 1,976
| 16,721
| 5.4833
| 0.069332
| 0.109645
| 0.076234
| 0.101523
| 0.877896
| 0.843378
| 0.787079
| 0.72515
| 0.664698
| 0.628888
| 0
| 0.009955
| 0.056815
| 16,721
| 154
| 1,103
| 108.577922
| 0.677066
| 0.00317
| 0
| 0.242424
| 1
| 0.05303
| 0.753735
| 0.562736
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037879
| false
| 0
| 0.030303
| 0.030303
| 0.106061
| 0.007576
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
20d91dca622025c40e9655704f8c22f3046c2078
| 61,881
|
py
|
Python
|
darkhistory/electrons/ics/BE_integrals.py
|
cgiovanetti/DarkHistory
|
ca1ce2e9478be1023a7b5fee19e7c1b2a21862e8
|
[
"MIT"
] | 8
|
2019-04-23T02:09:40.000Z
|
2022-02-15T21:11:25.000Z
|
darkhistory/electrons/ics/BE_integrals.py
|
cgiovanetti/DarkHistory
|
ca1ce2e9478be1023a7b5fee19e7c1b2a21862e8
|
[
"MIT"
] | 1
|
2017-09-29T17:36:45.000Z
|
2017-09-29T17:36:45.000Z
|
darkhistory/electrons/ics/BE_integrals.py
|
cgiovanetti/DarkHistory
|
ca1ce2e9478be1023a7b5fee19e7c1b2a21862e8
|
[
"MIT"
] | 10
|
2019-04-23T16:29:58.000Z
|
2022-02-05T01:25:31.000Z
|
"""Integrals over the Bose-Einstein distribution."""
import numpy as np
import scipy.special as sp
from darkhistory.utilities import log_1_plus_x
from darkhistory.utilities import check_err
from darkhistory.utilities import bernoulli as bern
from darkhistory.utilities import log_series_diff
from darkhistory.utilities import spence_series_diff
from darkhistory.utilities import exp_expn
from darkhistory.utilities import hyp2f1_func_real
from scipy.integrate import quad
def F2(a,b,tol=1e-10):
"""Definite integral of x^2/[(exp(x) - 1)]
Parameters
----------
a : ndarray
Lower limit of integration. Can be either 1D or 2D.
b : ndarray
Upper limit of integration. Can be either 1D or 2D.
tol : float
The relative tolerance to be reached. Default is 1e-10.
Returns
-------
float
The resulting integral.
"""
# bound is fixed. If changed to another number, the exact integral from bound to infinity later in the code needs to be changed to the appropriate value.
bound = 2.
# Two different series to approximate this: below and above bound.
def low_summand(x, k):
if k == 1:
return x**2/2 - x**3/6
else:
return(
bern(k)*x**(k+2)/(sp.factorial(k)*(k+2))
)
# B_n for n odd, n > 1 is zero.
def high_summand(x, k):
inf = (x == np.inf)
expr = np.zeros_like(x)
# gammaincc(n,x) = 1/gamma(n) * int_x^\infty t^{n-1}exp(-t) dt
expr[~inf] = 2*sp.gammaincc(
3, k*np.array(x[~inf], dtype='float64')
)/k**3
return expr
if a.ndim == 1 and b.ndim == 2:
if b.shape[1] != a.size:
raise TypeError('The second dimension of b must have the same length as a')
# Extend a to a 2D array.
a = np.outer(np.ones(b.shape[0], dtype='float128'), a)
elif a.ndim == 2 and b.ndim == 1:
if a.shape[1] != b.size:
raise TypeError('The second dimension of a must have the same length as b')
b = np.outer(np.ones(a.shape[0], dtype='float128'), b)
# if both are 1D, the rest of the code still works.
integral = np.zeros(a.shape, dtype='float128')
err = np.zeros_like(integral)
next_term = np.zeros_like(integral)
both_low = (a < bound) & (b < bound)
low_high = (a < bound) & (b >= bound)
both_high = (a > bound) & (b > bound)
# Both low
if np.any(both_low):
# Initialize first term of each sum for either limit, and set integral to that value.
low_sum_a = low_summand(a[both_low], 1)
low_sum_b = low_summand(b[both_low], 1)
integral[both_low] = low_sum_b - low_sum_a
# Index of summand.
k_low = 2
# Initialize error.
err_max = 10*tol
while err_max > tol:
# Get next term.
next_term[both_low] = (
low_summand(b[both_low], k_low)
- low_summand(a[both_low], k_low)
)
# Estimate the error
err[both_low] = np.abs(
np.divide(
next_term[both_low],
integral[both_low],
out = np.zeros_like(next_term[both_low]),
where = integral[both_low] != 0
)
)
# Add the next term in the series to the integral.
integral [both_low] += next_term[both_low]
# Increment k_low. Increment by 2 since B_n is zero for odd n > 1.
k_low += 2
# Set the errors. Only propagate parts where the errors are large to the next step.
err_max = np.max(err[both_low])
both_low &= (err > tol)
# a low b high
if np.any(low_high):
# Evaluate the definite integral from a to 2, and then 2 to b.
low_sum_a = low_summand(a[low_high], 1)
high_sum_b = high_summand(b[low_high], 1)
low_sum_bound = low_summand(bound, 1)
# Exact integral from 2 to infinity.
int_bound_inf = np.float128(1.417948518338124870521)
# First term in integral from a to bound
int_a_bound = low_sum_bound - low_sum_a
# First term in integral from bound to infinity
int_bound_b = int_bound_inf - high_sum_b
# Initialize the integral
integral[low_high] = int_a_bound + int_bound_b
# Counters, error estimate
k_low = 2
k_high = 2
err_max = 10*tol
# Arrays for next term
next_term_a_bound = np.zeros_like(integral)
next_term_bound_b = np.zeros_like(integral)
while err_max > tol:
next_term_a_bound[low_high] = (
low_summand(bound, k_low)
- low_summand(a[low_high], k_low)
)
# Only need to compute the next term to correct high_sum_b, since int_bound_inf is exact.
next_term_bound_b[low_high] = (
-high_summand(b[low_high], k_high)
)
next_term[low_high] = (
next_term_a_bound[low_high]
+ next_term_bound_b[low_high]
)
# Error estimate
err[low_high] = np.abs(
np.divide(
next_term[low_high],
integral[low_high],
out = np.zeros_like(next_term[low_high]),
where = integral[low_high] != 0
)
)
# Add the next terms to the current integral.
integral[low_high] += next_term[low_high]
k_low += 2
k_high += 1
err_max = np.max(err[low_high])
low_high &= (err > tol)
# Both high
if np.any(both_high):
high_sum_a = high_summand(a[both_high], 1)
high_sum_b = high_summand(b[both_high], 1)
integral[both_high] = high_sum_a - high_sum_b
k_high = 2
err_max = 10*tol
while err_max > tol:
next_term[both_high] = (
high_summand(a[both_high], k_high)
- high_summand(b[both_high], k_high)
)
err[both_high] = np.abs(
np.divide(
next_term[both_high],
integral[both_high],
out = np.zeros_like(next_term[both_high]),
where = integral[both_high] != 0
)
)
integral[both_high] += next_term[both_high]
k_high += 1
err_max = np.max(err[both_high])
both_high &= (err > tol)
return integral, err
def F1(a,b,epsrel=0):
"""Definite integral of x/[(exp(x) - 1)].
This is computed from the indefinite integral
.. math::
\\int dx \\frac{x}{e^x - 1} = x \\log\\left(1 - e^{-x} \\right)
- \\text{Li}_2\\left(e^{-x}\\right) =
x \\log\\left(1 - e^{-x} \\right) -
\\text{Sp}\\left( 1 - e^{-x} \\right) + \\frac{\\pi^2}{6} \\,,
where Sp is Spence's function, as implemented in ``scipy.special.spence``.
Parameters
----------
a : ndarray
Lower limit of integration. Can be either 1D or 2D.
b : ndarray
Upper limit of integration. Can be either 1D or 2D.
epsrel : float
Target relative error associated with series expansion. If zero, then the error is not computed. Default is 0. If the error is larger than ``epsrel``, then the Taylor expansions used here are insufficient. Higher order terms can be added very easily, however.
Returns
-------
float
The resulting integral.
Notes
-----
For a or b > 0.01, the exact analytic expression is used, whereas below that we use a series expansion. This avoids numerical errors due to computation of log(1 - exp(-x)) and likewise in the `spence` function. Note that `scipy.special.spence` can only take `float64` numbers, so downcasting is necessary for 0.01 < x < 3.
See Also
---------
:func:`.log_1_plus_x`, :func:`.spence_series_diff`
"""
lowlim = 0.1
upplim = 3
def indef_int(x):
inf = (x == np.inf)
low = (x < lowlim)
high = (x > upplim) & (~inf)
gen = ~(low | high) & (~inf)
expr = np.zeros(x.size)
# Two different series for small and large x limit.
# Excludes pi^2/6 to avoid catastrophic cancellation.
if np.any(inf):
expr[inf] = 0
if np.any(low):
expr[low] = (
x[low] - x[low]**2/4 + x[low]**3/36
- x[low]**5/3600 + x[low]**7/211680 - x[low]**9/10886400
)
if np.any(high):
n = np.arange(11) + 1
expr[high] = (
x[high]*log_1_plus_x(-np.exp(-x[high]))
- np.exp(-x[high]) - np.exp(-2*x[high])/4
- np.exp(-3*x[high])/9 - np.exp(-4*x[high])/16
- np.exp(-5*x[high])/25 - np.exp(-6*x[high])/36
- np.exp(-7*x[high])/49 - np.exp(-8*x[high])/64
- np.exp(-9*x[high])/81
- np.exp(-10*x[high])/100
- np.exp(-11*x[high])/121
)
if np.any(gen):
expr[gen] = (x[gen]*log_1_plus_x(-np.exp(-x[gen]))
- sp.spence(
np.array(1. - np.exp(-x[gen]), dtype='float64')
)
)
return expr
if a.ndim == 1 and b.ndim == 2:
if b.shape[1] != a.size:
raise TypeError('The second dimension of b must have the same length as a.')
# Extend a to a 2D array.
a = np.outer(np.ones(b.shape[0]), a)
elif a.ndim == 2 and b.ndim == 1:
if a.shape[1] != b.size:
raise TypeError('The second dimension of a must have the same length as b.')
b = np.outer(np.ones(a.shape[0]), b)
# if both are 1D, then the rest of the code still works.
integral = np.zeros(a.shape, dtype='float128')
both_low = (a < lowlim) & (b < lowlim)
both_high = (a > upplim) & (b > upplim)
if np.any(both_low):
integral[both_low] = (
b[both_low]-a[both_low]
- (b[both_low]-a[both_low])*(b[both_low]+a[both_low])/4
+ (b[both_low]**3 - a[both_low]**3)/36
- (b[both_low]**5 - a[both_low]**5)/3600
+ (b[both_low]**7 - a[both_low]**7)/211680
- (b[both_low]**9 - a[both_low]**9)/10886400
)
if epsrel > 0:
err = (b[both_low]**11 - a[both_low]**11)/526901760
check_err(integral[both_low], err, epsrel)
if np.any(both_high):
# Use a series for the spence function.
spence_term = np.zeros_like(integral)
spence_term[both_high] = spence_series_diff(
np.exp(-b[both_high]),
np.exp(-a[both_high])
)
b_inf = both_high & (b == np.inf)
b_not_inf = both_high & (b != np.inf)
integral[b_inf] = (
- a[b_inf]*log_1_plus_x(-np.exp(-a[b_inf]))
- spence_term[b_inf]
)
integral[b_not_inf] = (
b[b_not_inf]*log_1_plus_x(-np.exp(-b[b_not_inf]))
- a[b_not_inf]*log_1_plus_x(-np.exp(-a[b_not_inf]))
- spence_term[b_not_inf]
)
if epsrel > 0:
err = (
np.exp(-b[both_high])**11
- np.exp(-a[both_high])**11
)/11**2
check_err(integral[both_high], err, epsrel)
gen_case = ~(both_low | both_high)
if np.any(gen_case):
integral[gen_case] = indef_int(b[gen_case]) - indef_int(a[gen_case])
# Correct for missing pi^2/6 where necessary.
a_low_b_notlow = (a < lowlim) & (b >= lowlim)
integral[a_low_b_notlow] += np.pi**2/6
return integral
def F0(a,b,epsrel=0):
"""Definite integral of 1/[(exp(x) - 1)].
Parameters
----------
a : ndarray
Lower limit of integration. Can be either 1D or 2D.
b : ndarray
Upper limit of integration. Can be either 1D or 2D.
err : float
Error associated with series expansion. If zero, then the error is not computed.
Returns
-------
float
The resulting integral.
"""
lowlim = 0.1
upplim = 3
def indef_int(x):
inf = (x == np.inf)
low = (x <= 1e-10)
high = (x > 1e-10) & (~inf)
expr = np.zeros_like(x)
if np.any(inf):
expr[inf] = 0
if np.any(high):
expr[high] = log_1_plus_x(-np.exp(-x[high]))
if np.any(low):
expr[low] = (
np.log(x[low]) - x[low]/2 + x[low]**2/24
- x[low]**4/2880 + x[low]**6/181440
- x[low]**8/9676800 + x[low]**10/479001600
)
return expr
if a.ndim == 1 and b.ndim == 2:
if b.shape[1] != a.size:
raise TypeError('The second dimension of b must have the same length as a.')
# Extend a to a 2D array.
a = np.outer(np.ones(b.shape[0]), a)
elif a.ndim == 2 and b.ndim == 1:
if a.shape[1] != b.size:
raise TypeError('The second dimension of a must have the same length as b.')
b = np.outer(np.ones(a.shape[0]), b)
# if both are 1D, then the rest of the code still works.
integral = np.zeros(a.shape, dtype='float128')
both_low = (a < lowlim) & (b < lowlim)
both_high = (a > upplim) & (b > upplim)
if np.any(both_low):
integral[both_low] = (
np.log(b[both_low]/a[both_low])
- (b[both_low]-a[both_low])/2
+ (b[both_low]-a[both_low])*(b[both_low]+a[both_low])/24
- (b[both_low]**4 - a[both_low]**4)/2880
+ (b[both_low]**6 - a[both_low]**6)/181440
- (b[both_low]**8 - a[both_low]**8)/9676800
+ (b[both_low]**10 - a[both_low]**10)/479001600
)
if epsrel > 0:
err = -(b[both_low]**12 - a[both_low]**12)*691/15692092416000
check_err(integral[both_low], err, epsrel)
if np.any(both_high):
integral[both_high] = log_series_diff(
np.exp(-b[both_high]),
np.exp(-a[both_high])
)
if epsrel > 0:
err = -(
np.exp(-b[both_high])**12 -
np.exp(-a[both_high])**12
)/12
check_err(integral[both_high], err, epsrel)
gen_case = ~(both_low | both_high)
if np.any(gen_case):
integral[gen_case] = indef_int(b[gen_case]) - indef_int(a[gen_case])
return integral
def F_inv(a,b,tol=1e-10):
"""Definite integral of (1/x)/(exp(x) - 1).
Parameters
----------
a : ndarray
Lower limit of integration.
b : ndarray
Upper limit of integration.
tol : float
The relative tolerance to be reached.
Returns
-------
float
The resulting integral.
"""
# bound is fixed. If changed to another number, the exact integral from bound to infinity later in the code needs to be changed to the appropriate value.
bound = 2.
# Two different series to approximate this: below and above bound.
def low_summand(x, k):
if k == 1:
return -1/x - np.log(x)/2
else:
return (
bern(k)*(x**(k-1))/
(sp.factorial(k)*(k-1))
)
# B_n for n odd, n > 1 is zero.
def high_summand(x, k):
inf = (x == np.inf)
expr = np.zeros_like(x)
expr[~inf] = sp.expn(1, k*np.array(x[~inf], dtype='float64'))
return expr
if a.ndim == 1 and b.ndim == 2:
if b.shape[1] != a.size:
raise TypeError('The second dimension of b must have the same length as a.')
# Extend a to a 2D array.
a = np.outer(np.ones(b.shape[0],dtype='float128'), a)
elif a.ndim == 2 and b.ndim == 1:
if a.shape[1] != b.size:
raise TypeError('The second dimension of a must have the same length as b.')
b = np.outer(np.ones(a.shape[0],dtype='float128'), b)
# if both are 1D, then the rest of the code still works.
integral = np.zeros(a.shape, dtype='float128')
err = np.zeros_like(integral)
next_term = np.zeros_like(integral)
both_low = (a < bound) & (b < bound)
low_high = (a < bound) & (b >= bound)
both_high = (a > bound) & (b > bound)
# Both low
if np.any(both_low):
low_sum_a = low_summand(a[both_low], 1)
low_sum_b = low_summand(b[both_low], 1)
integral[both_low] = low_sum_b - low_sum_a
k_low = 2
err_max = 10*tol
while err_max > tol:
next_term[both_low] = (
low_summand(b[both_low], k_low)
- low_summand(a[both_low], k_low)
)
err[both_low] = np.abs(
np.divide(
next_term[both_low],
integral[both_low],
out = np.zeros_like(next_term[both_low]),
where = integral[both_low] != 0
)
)
integral[both_low] += next_term[both_low]
k_low += 2
err_max = np.max(err[both_low])
both_low &= (err > tol)
# a low b high
if np.any(low_high):
# Evaluate the definite integral from a to 2, and then 2 to b.
low_sum_a = low_summand(a[low_high], 1)
high_sum_b = high_summand(b[low_high], 1)
low_sum_bound = low_summand(bound, 1)
# Exact integral from 2 to infinity.
int_bound_inf = np.float128(0.053082306482669888568)
int_a_bound = low_sum_bound - low_sum_a
int_bound_b = int_bound_inf - high_sum_b
integral[low_high] = int_a_bound + int_bound_b
k_low = 2
k_high = 2
err_max = 10*tol
next_term_a_bound = np.zeros_like(integral)
next_term_bound_b = np.zeros_like(integral)
while err_max > tol:
next_term_a_bound[low_high] = (
low_summand(bound, k_low)
- low_summand(a[low_high], k_low)
)
# Only need to compute the next term for the b to inf integral.
next_term_bound_b[low_high] = (
-high_summand(b[low_high], k_high)
)
next_term[low_high] = (
next_term_a_bound[low_high]
+ next_term_bound_b[low_high]
)
err[low_high] = np.abs(
np.divide(
next_term[low_high],
integral[low_high],
out = np.zeros_like(next_term[low_high]),
where = integral[low_high] != 0
)
)
integral[low_high] += next_term[low_high]
k_low += 2
k_high += 1
err_max = np.max(err[low_high])
low_high &= (err > tol)
# Both high
if np.any(both_high):
high_sum_a = high_summand(a[both_high], 1)
high_sum_b = high_summand(b[both_high], 1)
integral[both_high] = high_sum_a - high_sum_b
k_high = 2
err_max = 10*tol
while err_max > tol:
next_term[both_high] = (
high_summand(a[both_high], k_high)
- high_summand(b[both_high], k_high)
)
err[both_high] = np.abs(
np.divide(
next_term[both_high],
integral[both_high],
out = np.zeros_like(next_term[both_high]),
where = integral[both_high] != 0
)
)
integral[both_high] += next_term[both_high]
k_high += 1
err_max = np.max(err[both_high])
both_high &= (err > tol)
return integral, err
def F_inv_a(lowlim, a, tol=1e-10):
"""Integral of 1/((x+a)(exp(x) - 1)) from lowlim to infinity.
Parameters
----------
a : ndarray
Parameter in (x+a).
lowlim : ndarray
Lower limit of integration.
tol : float
The relative tolerance to be reached.
Returns
-------
ndarray
The resulting integral.
"""
bound = np.ones_like(lowlim, dtype='float128')*2.
# Two different series to approximate this: below and above bound.
def low_summand(x, a, k):
x_flt64 = np.array(x, dtype='float64')
a_flt64 = np.array(a, dtype='float64')
if k == 1:
expr = np.log(x)/a - np.log(x+a)/a - 0.5*x*(
1/a - x/(2*a**2)
*hyp2f1_func_real(1, -x/a)
)
# expr = np.log(x)/a - np.log(x+a)/a - 0.5*x*(
# 1/a - x/(2*a**2)
# *np.real(sp.hyp2f1(1, 2, 3, -x_flt64/a_flt64 + 0j))
# )
return expr
else:
return bern(k)*x**k/(sp.factorial(k)*k)*(
1/a - k*x/((k+1)*a**2)*hyp2f1_func_real(k, -x/a)
)
# return bern(k)*x**k/(sp.factorial(k)*k)*(
# 1/a - k*x/((k+1)*a**2)*np.real(
# sp.hyp2f1(1, k+1, k+2, -x_flt64/a_flt64 + 0j)
# )
# )
def high_summand(x, a, k):
x_flt64 = np.array(x, dtype='float64')
a_flt64 = np.array(a, dtype='float64')
inf = (x == np.inf)
expr = np.zeros_like(x)
expr[inf] = 0
expr[~inf] = np.exp(-k*x[~inf])*exp_expn(
1, k*(x[~inf] + a[~inf])
)
return expr
if a.ndim == 1 and lowlim.ndim == 2:
if lowlim.shape[1] != a.size:
raise TypeError('The second dimension of lowlim must have the same length as a.')
# Extend a to a 2D array.
a = np.outer(np.ones(lowlim.shape[0]), a)
elif a.ndim == 2 and lowlim.ndim == 1:
if a.shape[1] != lowlim.size:
raise TypeError('The second dimension of a must have the same length as lowlim.')
lowlim = np.outer(np.ones(a.shape[0]), lowlim)
# if both are 1D, then the rest of the code still works.
integral = np.zeros(lowlim.shape, dtype='float128')
err = np.zeros_like(integral)
next_term = np.zeros_like(integral)
a_is_zero = (a == 0)
low = (lowlim < 2) & ~a_is_zero
high = ~low & ~a_is_zero
if np.any(a_is_zero):
integral[a_is_zero] = F_inv(
lowlim[a_is_zero],
np.ones_like(lowlim[a_is_zero])*np.inf,
tol = tol
)
if np.any(low):
integral[low] = (
low_summand(bound[low], a[low], 1)
- low_summand(lowlim[low], a[low], 1)
+ high_summand(bound[low], a[low], 1)
)
k_low = 2
k_high = 2
err_max = 10*tol
while err_max > tol:
next_term[low] = (
low_summand(bound[low], a[low], k_low)
- low_summand(lowlim[low], a[low], k_low)
+ high_summand(bound[low], a[low], k_high)
)
err[low] = np.abs(
np.divide(
next_term[low],
integral[low],
out = np.zeros_like(next_term[low]),
where = integral[low] != 0
)
)
integral[low] += next_term[low]
k_low += 2
k_high += 1
err_max = np.max(err[low])
low &= (err > tol)
if np.any(high):
integral[high] = high_summand(lowlim[high], a[high], 1)
k_high = 2
err_max = 10*tol
while err_max > tol:
next_term[high] = high_summand(lowlim[high], a[high], k_high)
err[high] = np.abs(
np.divide(
next_term[high],
integral[high],
out = np.zeros_like(next_term[high]),
where = integral[high] != 0
)
)
integral[high] += next_term[high]
k_high += 1
err_max = np.max(err[high])
high &= (err > tol)
return integral, err
def F_inv_n(a,b,n,tol=1e-10):
"""Definite integral of (1/x**n)/(exp(x) - 1)
Parameters
----------
a : ndarray
Lower limit of integration.
b : ndarray
Upper limit of integration.
tol : float
The relative tolerance to be reached.
Returns
-------
float
The resulting integral.
"""
bound = np.float128(2.)
# Two different series to approximate this: below and above bound.
def low_summand(x, k):
if k == 1:
init_sum = 0
for j in np.arange(n):
init_sum += bern(j)/sp.factorial(j)*x**(j-n)/(j-n)
init_sum += bern(n)/sp.factorial(n)*np.log(x)
return init_sum
else:
# B_n for n odd, n > 1 is zero.
if np.mod(k+n-1, 2) == 0:
return(
bern(k+n-1)/sp.factorial(k+n-1)*x**(k-1)/(k-1)
)
else:
return(
bern(k+n)/sp.factorial(k+n)*x**k/k
)
def high_summand(x, k):
inf = (x == np.inf)
expr = np.zeros_like(x)
expr[~inf] = (
sp.expn(n, k*np.array(x[~inf], dtype='float64'))/x[~inf]**(n-1)
)
return expr
if a.ndim == 1 and b.ndim == 2:
if b.shape[1] != a.size:
raise TypeError('The second dimension of b must have the same length as a.')
# Extend a to a 2D array.
a = np.outer(np.ones(b.shape[0],dtype='float128'), a)
elif a.ndim == 2 and b.ndim == 1:
if a.shape[1] != b.size:
raise TypeError('The second dimension of a must have the same length as b.')
b = np.outer(np.ones(a.shape[0],dtype='float128'), b)
# if both are 1D, then the rest of the code still works.
integral = np.zeros_like(a, dtype='float128')
err = np.zeros_like(integral)
next_term = np.zeros_like(integral)
both_low = (a < bound) & (b < bound)
low_high = (a < bound) & (b >= bound)
both_high = (a > bound) & (b > bound)
# Both low
if np.any(both_low):
low_sum_a = low_summand(a[both_low], 1)
low_sum_b = low_summand(b[both_low], 1)
integral[both_low] = low_sum_b - low_sum_a
k_low = 2
err_max = 10*tol
while err_max > tol:
next_term[both_low] = (
low_summand(b[both_low], k_low)
- low_summand(a[both_low], k_low)
)
err[both_low] = np.abs(
np.divide(
next_term[both_low],
integral[both_low],
out = np.zeros_like(next_term[both_low]),
where = integral[both_low] != 0
)
)
integral[both_low] += next_term[both_low]
k_low += 2
err_max = np.max(err[both_low])
both_low &= (err > tol)
# a low b high
if np.any(low_high):
# Evaluate the definite integral from a to 2, and then 2 to b.
low_sum_a = low_summand(a[low_high], 1)
high_sum_b = high_summand(b[low_high], 1)
low_sum_bound = low_summand(bound, 1)
# Exact integral from 2 to infinity.
int_bound_inf = quad(
lambda x: 1/(x**n*(np.exp(x) - 1)),
bound, np.inf, epsabs = 1e-16, epsrel=1e-16
)[0]
int_a_bound = low_sum_bound - low_sum_a
int_bound_b = int_bound_inf - high_sum_b
integral[low_high] = int_a_bound + int_bound_b
k_low = 2
k_high = 2
err_max = 10*tol
next_term_a_bound = np.zeros_like(integral)
next_term_bound_b = np.zeros_like(integral)
while err_max > tol:
next_term_a_bound[low_high] = (
low_summand(bound, k_low)
- low_summand(a[low_high], k_low)
)
# Only need to compute the next term for the b to inf integral.
next_term_bound_b[low_high] = (
-high_summand(b[low_high], k_high)
)
next_term[low_high] = (
next_term_a_bound[low_high]
+ next_term_bound_b[low_high]
)
err[low_high] = np.abs(
np.divide(
next_term[low_high],
integral[low_high],
out = np.zeros_like(next_term[low_high]),
where = integral[low_high] != 0
)
)
integral[low_high] += next_term[low_high]
k_low += 2
k_high += 1
err_max = np.max(err[low_high])
low_high &= (err > tol)
# Both high
if np.any(both_high):
high_sum_a = high_summand(a[both_high], 1)
high_sum_b = high_summand(b[both_high], 1)
integral[both_high] = high_sum_a - high_sum_b
k_high = 2
err_max = 10*tol
while err_max > tol:
next_term[both_high] = (
high_summand(a[both_high], k_high)
- high_summand(b[both_high], k_high)
)
err[both_high] = np.abs(
np.divide(
next_term[both_high],
integral[both_high],
out = np.zeros_like(next_term[both_high]),
where = integral[both_high] != 0
)
)
integral[both_high] += next_term[both_high]
k_high += 1
err_max = np.max(err[both_high])
both_high &= (err > tol)
return integral, err
def F_inv_3(a,b,tol=1e-10):
"""Definite integral of (1/x**3)/(exp(x) - 1).
Parameters
----------
a : ndarray
Lower limit of integration.
b : ndarray
Upper limit of integration.
tol : float
The relative tolerance to be reached.
Returns
-------
float
The resulting integral.
"""
# bound is fixed. If changed to another number, the exact integral from bound to infinity later in the code needs to be changed to the appropriate value.
bound = 2.
# Two different series to approximate this: below and above bound.
def low_summand(x, k):
if k == 1:
return -1/(3*x**3) + 1/(4*x**2) - 1/(12*x)
else:
return (
bern(k+2)*(x**(k-1))/(sp.factorial(k+2)*(k-1))
)
# B_n for n odd, n > 1 is zero.
def high_summand(x, k):
inf = (x == np.inf)
expr = np.zeros_like(x)
expr[~inf] = (
sp.expn(3, k*np.array(x[~inf], dtype='float64'))/x[~inf]**2
)
return expr
if a.ndim == 1 and b.ndim == 2:
if b.shape[1] != a.size:
raise TypeError('The second dimension of b must have the same length as a.')
# Extend a to a 2D array.
a = np.outer(np.ones(b.shape[0],dtype='float128'), a)
elif a.ndim == 2 and b.ndim == 1:
if a.shape[1] != b.size:
raise TypeError('The second dimension of a must have the same length as b.')
b = np.outer(np.ones(a.shape[0],dtype='float128'), b)
# if both are 1D, then the rest of the code still works.
integral = np.zeros_like(a, dtype='float128')
err = np.zeros_like(integral)
next_term = np.zeros_like(integral)
both_low = (a < bound) & (b < bound)
low_high = (a < bound) & (b >= bound)
both_high = (a > bound) & (b > bound)
# Both low
if np.any(both_low):
low_sum_a = low_summand(a[both_low], 1)
low_sum_b = low_summand(b[both_low], 1)
integral[both_low] = low_sum_b - low_sum_a
k_low = 2
err_max = 10*tol
while err_max > tol:
next_term[both_low] = (
low_summand(b[both_low], k_low)
- low_summand(a[both_low], k_low)
)
err[both_low] = np.abs(
np.divide(
next_term[both_low],
integral[both_low],
out = np.zeros_like(next_term[both_low]),
where = integral[both_low] != 0
)
)
integral[both_low] += next_term[both_low]
k_low += 2
err_max = np.max(err[both_low])
both_low &= (err > tol)
# a low b high
if np.any(low_high):
# Evaluate the definite integral from a to 2, and then 2 to b.
low_sum_a = low_summand(a[low_high], 1)
high_sum_b = high_summand(b[low_high], 1)
low_sum_bound = low_summand(bound, 1)
# Exact integral from 2 to infinity.
int_bound_inf = np.float128(0.0083036361900336)
int_a_bound = low_sum_bound - low_sum_a
int_bound_b = int_bound_inf - high_sum_b
integral[low_high] = int_a_bound + int_bound_b
k_low = 2
k_high = 2
err_max = 10*tol
next_term_a_bound = np.zeros_like(integral)
next_term_bound_b = np.zeros_like(integral)
while err_max > tol:
next_term_a_bound[low_high] = (
low_summand(bound, k_low)
- low_summand(a[low_high], k_low)
)
# Only need to compute the next term for the b to inf integral.
next_term_bound_b[low_high] = (
-high_summand(b[low_high], k_high)
)
next_term[low_high] = (
next_term_a_bound[low_high]
+ next_term_bound_b[low_high]
)
err[low_high] = np.abs(
np.divide(
next_term[low_high],
integral[low_high],
out = np.zeros_like(next_term[low_high]),
where = integral[low_high] != 0
)
)
integral[low_high] += next_term[low_high]
k_low += 2
k_high += 1
err_max = np.max(err[low_high])
low_high &= (err > tol)
# Both high
if np.any(both_high):
high_sum_a = high_summand(a[both_high], 1)
high_sum_b = high_summand(b[both_high], 1)
integral[both_high] = high_sum_a - high_sum_b
k_high = 2
err_max = 10*tol
while err_max > tol:
next_term[both_high] = (
high_summand(a[both_high], k_high)
- high_summand(b[both_high], k_high)
)
err[both_high] = np.abs(
np.divide(
next_term[both_high],
integral[both_high],
out = np.zeros_like(next_term[both_high]),
where = integral[both_high] != 0
)
)
integral[both_high] += next_term[both_high]
k_high += 1
err_max = np.max(err[both_high])
both_high &= (err > tol)
return integral, err
def F_inv_5(a,b,tol=1e-10):
"""Definite integral of (1/x**5)/(exp(x) - 1).
Parameters
----------
a : ndarray
Lower limit of integration.
b : ndarray
Upper limit of integration.
tol : float
The relative tolerance to be reached.
Returns
-------
float
The resulting integral.
"""
# bound is fixed. If changed to another number, the exact integral from bound to infinity later in the code needs to be changed to the appropriate value.
bound = 2.
# Two different series to approximate this: below and above bound.
def low_summand(x, k):
if k == 1:
return -1/(5*x**5) + 1/(8*x**4) - 1/(36*x**3) + 1/(720*x)
else:
return (
bern(k+4)*(x**(k-1))/(sp.factorial(k+4)*(k-1))
)
# B_n for n odd, n > 1 is zero.
def high_summand(x, k):
inf = (x == np.inf)
expr = np.zeros_like(x)
expr[~inf] = (
sp.expn(5, k*np.array(x[~inf], dtype='float64'))/x[~inf]**4
)
return expr
if a.ndim == 1 and b.ndim == 2:
if b.shape[1] != a.size:
raise TypeError('The second dimension of b must have the same length as a.')
# Extend a to a 2D array.
a = np.outer(np.ones(b.shape[0],dtype='float128'), a)
elif a.ndim == 2 and b.ndim == 1:
if a.shape[1] != b.size:
raise TypeError('The second dimension of a must have the same length as b.')
b = np.outer(np.ones(a.shape[0],dtype='float128'), b)
# if both are 1D, then the rest of the code still works.
integral = np.zeros_like(a, dtype='float128')
err = np.zeros_like(integral)
next_term = np.zeros_like(integral)
both_low = (a < bound) & (b < bound)
low_high = (a < bound) & (b >= bound)
both_high = (a > bound) & (b > bound)
# Both low
if np.any(both_low):
low_sum_a = low_summand(a[both_low], 1)
low_sum_b = low_summand(b[both_low], 1)
integral[both_low] = low_sum_b - low_sum_a
k_low = 2
err_max = 10*tol
while err_max > tol:
next_term[both_low] = (
low_summand(b[both_low], k_low)
- low_summand(a[both_low], k_low)
)
err[both_low] = np.abs(
np.divide(
next_term[both_low],
integral[both_low],
out = np.zeros_like(next_term[both_low]),
where = integral[both_low] != 0
)
)
integral[both_low] += next_term[both_low]
k_low += 2
err_max = np.max(err[both_low])
both_low &= (err > tol)
# a low b high
if np.any(low_high):
# Evaluate the definite integral from a to 2, and then 2 to b.
low_sum_a = low_summand(a[low_high], 1)
high_sum_b = high_summand(b[low_high], 1)
low_sum_bound = low_summand(bound, 1)
# Exact integral from 2 to infinity.
int_bound_inf = np.float128(0.001483878955697788)
int_a_bound = low_sum_bound - low_sum_a
int_bound_b = int_bound_inf - high_sum_b
integral[low_high] = int_a_bound + int_bound_b
k_low = 2
k_high = 2
err_max = 10*tol
next_term_a_bound = np.zeros_like(integral)
next_term_bound_b = np.zeros_like(integral)
while err_max > tol:
next_term_a_bound[low_high] = (
low_summand(bound, k_low)
- low_summand(a[low_high], k_low)
)
# Only need to compute the next term for the b to inf integral.
next_term_bound_b[low_high] = (
-high_summand(b[low_high], k_high)
)
next_term[low_high] = (
next_term_a_bound[low_high]
+ next_term_bound_b[low_high]
)
err[low_high] = np.abs(
np.divide(
next_term[low_high],
integral[low_high],
out = np.zeros_like(next_term[low_high]),
where = integral[low_high] != 0
)
)
integral[low_high] += next_term[low_high]
k_low += 2
k_high += 1
err_max = np.max(err[low_high])
low_high &= (err > tol)
# Both high
if np.any(both_high):
high_sum_a = high_summand(a[both_high], 1)
high_sum_b = high_summand(b[both_high], 1)
integral[both_high] = high_sum_a - high_sum_b
k_high = 2
err_max = 10*tol
while err_max > tol:
next_term[both_high] = (
high_summand(a[both_high], k_high)
- high_summand(b[both_high], k_high)
)
err[both_high] = np.abs(
np.divide(
next_term[both_high],
integral[both_high],
out = np.zeros_like(next_term[both_high]),
where = integral[both_high] != 0
)
)
integral[both_high] += next_term[both_high]
k_high += 1
err_max = np.max(err[both_high])
both_high &= (err > tol)
return integral, err
def F_log(a,b,tol=1e-10):
"""Definite integral of log(x)/(exp(x) - 1).
Parameters
----------
a : ndarray
Lower limit of integration.
b : ndarray
Upper limit of integration.
tol : float
The relative tolerance to be reached.
Returns
-------
float
The resulting integral.
"""
# bound is fixed. If changed to another number, the exact integral from bound to infinity later in the code needs to be changed to the appropriate value.
bound = 2.
# Two different series to approximate this: below and above bound.
def low_summand(x, k):
if k == 1:
return 1/2*(np.log(x)**2) - (x/2)*(np.log(x) - 1)
else:
return (
bern(k)*(x**k)/
(sp.factorial(k)*k**2)*(k*np.log(x) - 1)
)
# B_n for n odd, n > 1 is zero.
def high_summand(x, k):
# sp.expn does not support float128.
inf = (x == np.inf)
expr = np.zeros_like(x)
expr[inf] = 0
expr[~inf] = (
1/k*(np.exp(-k*x[~inf])*np.log(x[~inf])
+ sp.expn(
1, k*np.array(x[~inf], dtype='float64')
)
)
)
return expr
if a.ndim == 1 and b.ndim == 2:
if b.shape[1] != a.size:
raise TypeError('The second dimension of b must have the same length as a.')
# Extend a to a 2D array.
a = np.outer(np.ones(b.shape[0]), a)
elif a.ndim == 2 and b.ndim == 1:
if a.shape[1] != b.size:
raise TypeError('The second dimension of a must have the same length as b.')
b = np.outer(np.ones(a.shape[0]), b)
# if both are 1D, then the rest of the code still works.
integral = np.zeros(a.shape, dtype='float128')
err = np.zeros_like(integral)
next_term = np.zeros_like(integral)
both_low = (a < bound) & (b < bound)
low_high = (a < bound) & (b >= bound)
both_high = (a > bound) & (b > bound)
# Both low
if np.any(both_low):
low_sum_a = low_summand(a[both_low], 1)
low_sum_b = low_summand(b[both_low], 1)
integral[both_low] = low_sum_b - low_sum_a
k_low = 2
err_max = 10*tol
while err_max > tol:
next_term[both_low] = (
low_summand(b[both_low], k_low)
- low_summand(a[both_low], k_low)
)
err[both_low] = np.abs(
np.divide(
next_term[both_low],
integral[both_low],
out = np.zeros_like(next_term[both_low]),
where = integral[both_low] != 0
)
)
integral[both_low] += next_term[both_low]
k_low += 2
err_max = np.max(err[both_low])
both_low &= (err > tol)
# a low b high
if np.any(low_high):
# Evaluate the definite integral from a to 2, and then 2 to b.
low_sum_a = low_summand(a[low_high], 1)
high_sum_b = high_summand(b[low_high], 1)
low_sum_bound = low_summand(bound, 1)
# Exact integral from 2 to infinity.
int_bound_inf = np.float128(0.15171347859984083704)
int_a_bound = low_sum_bound - low_sum_a
int_bound_b = int_bound_inf - high_sum_b
integral[low_high] = int_a_bound + int_bound_b
k_low = 2
k_high = 2
err_max = 10*tol
next_term_a_bound = np.zeros_like(integral)
next_term_bound_b = np.zeros_like(integral)
while err_max > tol:
next_term_a_bound[low_high] = (
low_summand(bound, k_low)
- low_summand(a[low_high], k_low)
)
# Only need to compute the next term for the b to inf integral.
next_term_bound_b[low_high] = (
-high_summand(b[low_high], k_high)
)
next_term[low_high] = (
next_term_a_bound[low_high]
+ next_term_bound_b[low_high]
)
err[low_high] = np.abs(
np.divide(
next_term[low_high],
integral[low_high],
out = np.zeros_like(next_term[low_high]),
where = integral[low_high] != 0
)
)
integral[low_high] += next_term[low_high]
k_low += 2
k_high += 1
err_max = np.max(err[low_high])
low_high &= (err > tol)
# Both high
if np.any(both_high):
high_sum_a = high_summand(a[both_high], 1)
high_sum_b = high_summand(b[both_high], 1)
integral[both_high] = high_sum_a - high_sum_b
k_high = 2
err_max = 10*tol
while err_max > tol:
next_term[both_high] = (
high_summand(a[both_high], k_high)
- high_summand(b[both_high], k_high)
)
err[both_high] = np.abs(
np.divide(
next_term[both_high],
integral[both_high],
out = np.zeros_like(next_term[both_high]),
where = integral[both_high] != 0
)
)
integral[both_high] += next_term[both_high]
k_high += 1
err_max = np.max(err[both_high])
both_high &= (err > tol)
return integral, err
def F_x_log(a,b,tol=1e-10):
"""Definite integral of x log(x)/(exp(x) - 1).
Parameters
----------
a : ndarray
Lower limit of integration.
b : ndarray
Upper limit of integration.
tol : float
The relative tolerance to be reached.
Returns
-------
float
The resulting integral.
"""
# bound is fixed. If changed to another number, the exact integral from bound to infinity later in the code needs to be changed to the appropriate value.
bound = 2.
def low_summand(x,k):
if k==1:
return x*np.log(x) - x - (x**2/2)*(2*np.log(x) - 1)/4
else:
return (
bern(k)*(x**(k+1))/
(sp.factorial(k)*(k+1)**2)*((k+1)*np.log(x) - 1)
)
def high_summand(x, k):
inf = (x == np.inf)
expr = np.zeros_like(x)
expr[inf] = 0
expr[~inf] = (
1/k**2*(
(1+k*x[~inf])*np.exp(-k*x[~inf])*np.log(x[~inf])
+ (1+k*x[~inf])*sp.expn(
1, k*np.array(x[~inf], dtype='float64')
)
+ sp.expn(2, k*np.array(x[~inf], dtype='float64'))
)
)
return expr
if a.ndim == 1 and b.ndim == 2:
if b.shape[1] != a.size:
raise TypeError('The second dimension of b must have the same length as a.')
# Extend a to a 2D array.
a = np.outer(np.ones(b.shape[0]), a)
elif a.ndim == 2 and b.ndim == 1:
if a.shape[1] != b.size:
raise TypeError('The second dimension of a must have the same length as b.')
b = np.outer(np.ones(a.shape[0]), b)
# if both are 1D, then the rest of the code still works.
integral = np.zeros(a.shape, dtype='float128')
err = np.zeros_like(integral)
next_term = np.zeros_like(integral)
both_low = (a < bound) & (b < bound)
low_high = (a < bound) & (b >= bound)
both_high = (a > bound) & (b > bound)
# Both low
if np.any(both_low):
low_sum_a = low_summand(a[both_low], 1)
low_sum_b = low_summand(b[both_low], 1)
integral[both_low] = low_sum_b - low_sum_a
k_low = 2
err_max = 10*tol
while err_max > tol:
next_term[both_low] = (
low_summand(b[both_low], k_low)
- low_summand(a[both_low], k_low)
)
err[both_low] = np.abs(
np.divide(
next_term[both_low],
integral[both_low],
out = np.zeros_like(next_term[both_low]),
where = integral[both_low] != 0
)
)
integral[both_low] += next_term[both_low]
k_low += 2
err_max = np.max(err[both_low])
both_low &= (err > tol)
# a low b high
if np.any(low_high):
# Evaluate the definite integral from a to 2, and then 2 to b.
low_sum_a = low_summand(a[low_high], 1)
high_sum_b = high_summand(b[low_high], 1)
low_sum_bound = low_summand(bound, 1)
# Exact integral from 2 to infinity.
int_bound_inf = np.float128(0.4888742871822041)
int_a_bound = low_sum_bound - low_sum_a
int_bound_b = int_bound_inf - high_sum_b
integral[low_high] = int_a_bound + int_bound_b
k_low = 2
k_high = 2
err_max = 10*tol
next_term_a_bound = np.zeros_like(integral)
next_term_bound_b = np.zeros_like(integral)
while err_max > tol:
next_term_a_bound[low_high] = (
low_summand(bound, k_low)
- low_summand(a[low_high], k_low)
)
next_term_bound_b[low_high] = (
-high_summand(b[low_high], k_high)
)
next_term[low_high] = (
next_term_a_bound[low_high]
+ next_term_bound_b[low_high]
)
err[low_high] = np.abs(
np.divide(
next_term[low_high],
integral[low_high],
out = np.zeros_like(next_term[low_high]),
where = integral[low_high] != 0
)
)
integral[low_high] += next_term[low_high]
k_low += 2
k_high += 1
err_max = np.max(err[low_high])
low_high &= (err > tol)
# Both high
if np.any(both_high):
high_sum_a = high_summand(a[both_high], 1)
high_sum_b = high_summand(b[both_high], 1)
integral[both_high] = high_sum_a - high_sum_b
k_high = 2
err_max = 10*tol
while err_max > tol:
next_term[both_high] = (
high_summand(a[both_high], k_high)
- high_summand(b[both_high], k_high)
)
err[both_high] = np.abs(
np.divide(
next_term[both_high],
integral[both_high],
out = np.zeros_like(next_term[both_high]),
where = integral[both_high] != 0
)
)
integral[both_high] += next_term[both_high]
k_high += 1
err_max = np.max(err[both_high])
both_high &= (err > tol)
return integral, err
def F_log_a(lowlim, a, tol=1e-10):
"""Integral of log(x+a)/(exp(x) - 1) from lowlim to infinity.
Parameters
----------
a : ndarray
Parameter in log(x+a).
lowlim : ndarray
Lower limit of integration.
tol : float
The relative tolerance to be reached.
Returns
-------
ndarray
The resulting integral.
"""
bound = np.ones_like(lowlim,dtype='float128')*2.
# Two different series to approximate this: below and above bound.
def low_summand(x, a, k):
x_flt64 = np.array(x, dtype='float64')
a_flt64 = np.array(a, dtype='float64')
if k == 1:
expr = np.zeros_like(x)
a_pos = a > 0
a_neg = a < 0
if np.any(a_pos):
expr[a_pos] = (
np.log(x[a_pos])*np.log(a[a_pos])
- sp.spence(1+x_flt64[a_pos]/a_flt64[a_pos])
- (
(x[a_pos]+a[a_pos])
*np.log(x[a_pos]+a[a_pos])
- x[a_pos]
)/2
)
if np.any(a_neg):
expr[a_neg] = (
np.log(-x[a_neg]/a[a_neg])*np.log(x[a_neg]+a[a_neg])
+ sp.spence(-x_flt64[a_neg]/a_flt64[a_neg])
- (
(x[a_neg]+a[a_neg])*np.log(x[a_neg]+a[a_neg])
- x[a_neg]
)/2
)
return expr
else:
return (
bern(k)*x**k/(sp.factorial(k)*k)*(
np.log(x + a)
- x/(a*(k+1))*hyp2f1_func_real(k, -x/a)
)
)
# return (
# bern(k)*x**k/(sp.factorial(k)*k)*(
# np.log(x + a) - x/(a*(k+1))*np.real(
# sp.hyp2f1(
# 1, k+1, k+2, -x_flt64/a_flt64 + 0j
# )
# )
# )
# )
def high_summand(x, a, k):
x_flt64 = np.array(x, dtype='float64')
a_flt64 = np.array(a, dtype='float64')
inf = (x == np.inf)
expr = np.zeros_like(x)
expr[inf] = 0
expr[~inf] = (
np.exp(-k*x[~inf])/k*(
np.log(x[~inf] + a[~inf])
+ exp_expn(1, k*(x[~inf] + a[~inf]))
)
)
return expr
if a.ndim == 1 and lowlim.ndim == 2:
if lowlim.shape[1] != a.size:
raise TypeError('The second dimension of lowlim must have the same length as a.')
# Extend a to a 2D array.
a = np.outer(np.ones(lowlim.shape[0]), a)
elif a.ndim == 2 and lowlim.ndim == 1:
if a.shape[1] != lowlim.size:
raise TypeError('The second dimension of a must have the same length as lowlim.')
lowlim = np.outer(np.ones(a.shape[0]), lowlim)
# if both are 1D, then the rest of the code still works.
integral = np.zeros(lowlim.shape, dtype='float128')
err = np.zeros_like(integral)
next_term = np.zeros_like(integral)
a_is_zero = (a == 0)
low = (lowlim < 2) & ~a_is_zero
high = ~low & ~a_is_zero
if np.any(a_is_zero):
integral[a_is_zero] = F_log(lowlim[a_is_zero],
np.ones_like(lowlim[a_is_zero])*np.inf,
tol=tol
)
if np.any(low):
integral[low] = (
low_summand(bound[low], a[low], 1)
- low_summand(lowlim[low], a[low], 1)
+ high_summand(bound[low], a[low], 1)
)
k_low = 2
k_high = 2
err_max = 10*tol
while err_max > tol:
next_term[low] = (
low_summand(bound[low], a[low], k_low)
- low_summand(lowlim[low], a[low], k_low)
+ high_summand(bound[low], a[low], k_high)
)
err[low] = np.abs(
np.divide(
next_term[low],
integral[low],
out = np.zeros_like(next_term[low]),
where = integral[low] != 0
)
)
integral[low] += next_term[low]
k_low += 2
k_high += 1
err_max = np.max(err[low])
low &= (err > tol)
if np.any(high):
integral[high] = high_summand(lowlim[high], a[high], 1)
k_high = 2
err_max = 10*tol
while err_max > tol:
next_term[high] = high_summand(lowlim[high], a[high], k_high)
err[high] = np.abs(
np.divide(
next_term[high],
integral[high],
out = np.zeros_like(next_term[high]),
where = integral[high] != 0
)
)
integral[high] += next_term[high]
k_high += 1
err_max = np.max(err[high])
high &= (err > tol)
return integral, err
def F_x_log_a(lowlim, a, tol=1e-10):
"""Integral of x log (x+a)/(exp(x) - 1) from lowlim to infinity.
Parameters
----------
a : ndarray
Parameter in x log(x+a).
lowlim : ndarray
Lower limit of integration.
tol : float
The relative tolerance to be reached.
Returns
-------
ndarray
The resulting integral.
"""
bound = np.ones_like(lowlim, dtype='float128')*2
# Two different series to approximate this: below and above bound.
def low_summand(x, a, k):
# x_flt64 = np.array(x, dtype='float64')
# a_flt64 = np.array(a, dtype='float64')
if k == 1:
return (
x*(
np.log(a+x) - 1
+ hyp2f1_func_real(0, -x/a)
)
-x**2/8*(
2*np.log(a+x) - 1
+ hyp2f1_func_real(1, -x/a)
)
# x*(
# np.log(a+x) - 1
# + np.real(sp.hyp2f1(
# 1, 1, 2, -x/a + 0j)
# )
# )
# -x**2/8*(
# 2*np.log(a+x) - 1
# + np.real(sp.hyp2f1(
# 1, 2, 3, -x/a + 0j)
# )
# )
)
else:
return (
bern(k)*x**(k+1)/(sp.factorial(k)*(k+1)**2)*(
(k+1)*np.log(x+a) - 1
+ hyp2f1_func_real(k, -x/a)
)
)
# return (
# bern(k)*x**(k+1)/(sp.factorial(k)*(k+1)**2)*(
# (k+1)*np.log(x+a) - 1
# + np.real(sp.hyp2f1(
# 1, k+1, k+2, -x/a + 0j
# ))
# )
# )
def high_summand(x, a, k):
# x_flt64 = np.array(x, dtype='float64')
# a_flt64 = np.array(a, dtype='float64')
inf = (x == np.inf)
expr = np.zeros_like(x)
expr[inf] = 0
expr[~inf] = (
np.exp(-k*x[~inf])/k**2*(
(1+k*x[~inf])*np.log(x[~inf] + a[~inf])
+ (1+k*x[~inf])*exp_expn(1, k*(x[~inf] + a[~inf]))
+ exp_expn(2, k*(x[~inf] + a[~inf]))
)
)
return expr
if a.ndim == 1 and lowlim.ndim == 2:
if lowlim.shape[1] != a.size:
raise TypeError('The second dimension of lowlim must have the same length as a.')
# Extend a to a 2D array.
a = np.outer(np.ones(lowlim.shape[0]), a)
elif a.ndim == 2 and lowlim.ndim == 1:
if a.shape[1] != lowlim.size:
raise TypeError('The second dimension of a must have the same length as lowlim.')
lowlim = np.outer(np.ones(a.shape[0]), lowlim)
# if both are 1D, then the rest of the code still works.
integral = np.zeros(lowlim.shape, dtype='float128')
err = np.zeros_like(integral)
next_term = np.zeros_like(integral)
a_is_zero = (a == 0)
low = (lowlim < 2) & ~a_is_zero
high = ~low & ~a_is_zero
if np.any(a_is_zero):
integral[a_is_zero] = F_x_log(
lowlim[a_is_zero],
np.ones_like(lowlim[a_is_zero])*np.inf,
tol = tol
)
bound_flt64 = np.array(bound, dtype='float64')
a_flt64 = np.array(a, dtype='float64')
lowlim_flt64 = np.array(lowlim, dtype='float64')
if np.any(low):
integral[low] = (
low_summand(bound_flt64[low], a_flt64[low], 1)
- low_summand(lowlim_flt64[low], a_flt64[low], 1)
+ high_summand(bound_flt64[low], a_flt64[low], 1)
)
k_low = 2
k_high = 2
err_max = 10*tol
while err_max > tol:
next_term[low] = (
low_summand(
bound_flt64[low], a_flt64[low], k_low
)
- low_summand(
lowlim_flt64[low], a_flt64[low], k_low
)
+ high_summand(
bound_flt64[low], a_flt64[low], k_high
)
)
err[low] = np.abs(
np.divide(
next_term[low],
integral[low],
out = np.zeros_like(next_term[low]),
where = integral[low] != 0
)
)
integral[low] += next_term[low]
k_low += 2
k_high += 1
err_max = np.max(err[low])
low &= (err > tol)
if np.any(high):
integral[high] = high_summand(
lowlim_flt64[high], a_flt64[high], 1
)
k_high = 2
err_max = 10*tol
while err_max > tol:
next_term[high] = high_summand(
lowlim_flt64[high], a_flt64[high], k_high
)
err[high] = np.abs(
np.divide(
next_term[high],
integral[high],
out = np.zeros_like(next_term[high]),
where = integral[high] != 0
)
)
integral[high] += next_term[high]
k_high += 1
err_max = np.max(err[high])
high &= (err > tol)
return integral, err
| 29.202926
| 327
| 0.496679
| 8,543
| 61,881
| 3.402786
| 0.033712
| 0.040936
| 0.029137
| 0.022876
| 0.897626
| 0.883385
| 0.873891
| 0.866701
| 0.854076
| 0.831029
| 0
| 0.033329
| 0.381797
| 61,881
| 2,119
| 328
| 29.202926
| 0.726572
| 0.164348
| 0
| 0.738697
| 0
| 0
| 0.034187
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026054
| false
| 0
| 0.007663
| 0
| 0.0659
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
20dd81c847fc3dec3d6add3e88eb43b809b5055c
| 906
|
py
|
Python
|
ipeadatapy/describe.py
|
gustavobrangel/ipeadatapy
|
4a9460fe4869bba44ca9d6c5a3346e16d2776443
|
[
"MIT"
] | 41
|
2019-05-08T01:10:06.000Z
|
2022-02-23T02:08:37.000Z
|
ipeadatapy/describe.py
|
gustavobrangel/ipeadatapy
|
4a9460fe4869bba44ca9d6c5a3346e16d2776443
|
[
"MIT"
] | 4
|
2020-04-26T23:18:48.000Z
|
2021-04-29T10:03:45.000Z
|
ipeadatapy/describe.py
|
gustavobrangel/ipeadatapy
|
4a9460fe4869bba44ca9d6c5a3346e16d2776443
|
[
"MIT"
] | 8
|
2020-06-15T08:16:25.000Z
|
2021-11-25T01:29:43.000Z
|
import pandas as pd
from .metadata import metadata
from .metadata_old import metadata_old
def describe(series):
"""Describes the specified time series. series must be the time series' code."""
desc_df = pd.DataFrame({list(metadata(series)['NAME'])[0]: [list(metadata(series)['NAME'])[0], list(metadata(series)['CODE'])[0], list(metadata_old(series)['BIG THEME'])[0], list(metadata_old(series)['THEME'])[0], list(metadata(series)['SOURCE'])[0], list(metadata(series)['SOURCE ACRONYM'])[0], list(metadata(series)['COMMENT'])[0], list(metadata(series)['LAST UPDATE'])[0], list(metadata(series)['FREQUENCY'])[0], list(metadata(series)['MEASURE'])[0], list(metadata(series)['UNIT'])[0], list(metadata(series)['SERIES STATUS'])[0]]}, index=['Name', 'Code', 'Big Theme', 'Theme', 'Source', 'Source acronym', 'Comment', 'Last update', 'Frequency', 'Measure', 'Unit', 'Status'])
return desc_df
| 100.666667
| 681
| 0.684327
| 123
| 906
| 4.99187
| 0.292683
| 0.254072
| 0.254072
| 0.309446
| 0.257329
| 0.104235
| 0.104235
| 0.104235
| 0
| 0
| 0
| 0.015796
| 0.091611
| 906
| 8
| 682
| 113.25
| 0.730255
| 0.081678
| 0
| 0
| 0
| 0
| 0.223716
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.5
| 0
| 0.833333
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
456d23db7b71d6166d4cfe0d0d6f852fe2b394c9
| 138
|
py
|
Python
|
tests/test_units/test_auth/test_openid_connect.py
|
iburinoc/aiogoogle
|
6f702c0d19599fce1bd36b7a32922e76aaa74008
|
[
"MIT"
] | null | null | null |
tests/test_units/test_auth/test_openid_connect.py
|
iburinoc/aiogoogle
|
6f702c0d19599fce1bd36b7a32922e76aaa74008
|
[
"MIT"
] | null | null | null |
tests/test_units/test_auth/test_openid_connect.py
|
iburinoc/aiogoogle
|
6f702c0d19599fce1bd36b7a32922e76aaa74008
|
[
"MIT"
] | null | null | null |
# TODO:
def test_authorization_url():
pass
def test_decode_and_validate():
pass
def test_build_user_creds_jwt_grant():
pass
| 13.8
| 38
| 0.73913
| 20
| 138
| 4.6
| 0.7
| 0.228261
| 0.23913
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181159
| 138
| 10
| 39
| 13.8
| 0.814159
| 0.036232
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 0
| 1
| 0.5
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
4571c43222b631884d3cec187156449fc2fa3c7c
| 278
|
py
|
Python
|
src/huggingface/fortex/huggingface/__init__.py
|
Piyush13y/forte-wrappers
|
250df428a8705f769d53eb070f89c3f66e713015
|
[
"Apache-2.0"
] | 3
|
2021-06-17T18:52:00.000Z
|
2022-01-11T19:15:21.000Z
|
src/huggingface/fortex/huggingface/__init__.py
|
Piyush13y/forte-wrappers
|
250df428a8705f769d53eb070f89c3f66e713015
|
[
"Apache-2.0"
] | 66
|
2021-03-30T15:04:11.000Z
|
2022-03-24T04:35:11.000Z
|
src/huggingface/fortex/huggingface/__init__.py
|
Piyush13y/forte-wrappers
|
250df428a8705f769d53eb070f89c3f66e713015
|
[
"Apache-2.0"
] | 10
|
2021-03-16T19:48:31.000Z
|
2022-03-01T05:48:17.000Z
|
from fortex.huggingface.bio_ner_predictor import *
from fortex.huggingface.transformers_processor import *
from fortex.huggingface.question_and_answering_single import *
from fortex.huggingface.zero_shot_classifier import *
from fortex.huggingface.token_classification import *
| 46.333333
| 62
| 0.874101
| 34
| 278
| 6.882353
| 0.529412
| 0.213675
| 0.448718
| 0.461538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071942
| 278
| 5
| 63
| 55.6
| 0.906977
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
45d0e603b50a2fe165e0e63a16fffdf65a79155e
| 4,406
|
py
|
Python
|
text2vec/text_matching_dataset.py
|
shibing624/simtext
|
b9e805db5f6ba90115fbb8b824bf80c141f0ddde
|
[
"Apache-2.0"
] | 18
|
2019-11-12T03:28:23.000Z
|
2019-12-05T07:52:24.000Z
|
text2vec/text_matching_dataset.py
|
wuxiuzhi738/text2vec
|
b9e805db5f6ba90115fbb8b824bf80c141f0ddde
|
[
"Apache-2.0"
] | 3
|
2019-11-12T08:05:37.000Z
|
2019-12-04T13:49:25.000Z
|
text2vec/text_matching_dataset.py
|
wuxiuzhi738/text2vec
|
b9e805db5f6ba90115fbb8b824bf80c141f0ddde
|
[
"Apache-2.0"
] | 5
|
2019-11-17T13:58:33.000Z
|
2019-12-05T06:24:35.000Z
|
# -*- coding: utf-8 -*-
"""
@author:XuMing(xuming624@qq.com)
@description:
"""
import os
from torch.utils.data import Dataset
from loguru import logger
from transformers import PreTrainedTokenizer
from datasets import load_dataset
def load_train_data(path):
data = []
if not os.path.isfile(path):
return data
with open(path, 'r', encoding='utf8') as f:
for line in f:
line = line.strip().split('\t')
if len(line) != 3:
logger.warning(f'line size not match, pass: {line}')
continue
score = int(line[2])
if 'STS' in path.upper():
score = int(score > 2.5)
data.append((line[0], line[1], score))
return data
def load_test_data(path):
data = []
if not os.path.isfile(path):
return data
with open(path, 'r', encoding='utf8') as f:
for line in f:
line = line.strip().split('\t')
if len(line) != 3:
logger.warning(f'line size not match, pass: {line}')
continue
data.append((line[0], line[1], int(line[2])))
return data
class TextMatchingTrainDataset(Dataset):
"""文本匹配训练数据集, 重写__getitem__和__len__方法"""
def __init__(self, tokenizer: PreTrainedTokenizer, data: list, max_len: int = 64):
self.tokenizer = tokenizer
self.data = data
self.max_len = max_len
def __len__(self):
return len(self.data)
def text_2_id(self, text: str):
return self.tokenizer(text, max_length=self.max_len, truncation=True,
padding='max_length', return_tensors='pt')
def __getitem__(self, index: int):
line = self.data[index]
return self.text_2_id(line[0]), self.text_2_id(line[1]), line[2]
class TextMatchingTestDataset(Dataset):
"""文本匹配测试数据集, 重写__getitem__和__len__方法"""
def __init__(self, tokenizer: PreTrainedTokenizer, data: list, max_len: int = 64):
self.tokenizer = tokenizer
self.data = data
self.max_len = max_len
def __len__(self):
return len(self.data)
def text_2_id(self, text: str):
return self.tokenizer(text, max_length=self.max_len, truncation=True,
padding='max_length', return_tensors='pt')
def __getitem__(self, index: int):
line = self.data[index]
return self.text_2_id(line[0]), self.text_2_id(line[1]), line[2]
class HFTextMatchingTrainDataset(Dataset):
"""Load HuggingFace datasets to SBERT format
Args:
tokenizer (PreTrainedTokenizer): tokenizer
name (str): dataset name
max_len (int): max length of sentence
"""
def __init__(self, tokenizer: PreTrainedTokenizer, name="STS-B", max_len: int = 64):
self.tokenizer = tokenizer
self.data = load_dataset("shibing624/nli_zh", name.upper(), split="train")
self.max_len = max_len
self.name = name.upper()
def __len__(self):
return len(self.data)
def text_2_id(self, text: str):
return self.tokenizer(text, max_length=self.max_len, truncation=True,
padding='max_length', return_tensors='pt')
def __getitem__(self, index: int):
line = self.data[index]
# STS-B convert to 0/1 label
return self.text_2_id(line['sentence1']), self.text_2_id(line['sentence2']), int(
line['label'] > 2.5) if 'STS' in self.name else line['label']
class HFTextMatchingTestDataset(Dataset):
"""Load HuggingFace datasets to SBERT format
Args:
tokenizer (PreTrainedTokenizer): tokenizer
name (str): dataset name
max_len (int): max length of sentence
"""
def __init__(self, tokenizer: PreTrainedTokenizer, name="STS-B", max_len: int = 64, split="validation"):
self.tokenizer = tokenizer
self.data = load_dataset("shibing624/nli_zh", name.upper(), split=split)
self.max_len = max_len
def __len__(self):
return len(self.data)
def text_2_id(self, text: str):
return self.tokenizer(text, max_length=self.max_len, truncation=True,
padding='max_length', return_tensors='pt')
def __getitem__(self, index: int):
line = self.data[index]
return self.text_2_id(line['sentence1']), self.text_2_id(line['sentence2']), line['label']
| 31.927536
| 108
| 0.613709
| 570
| 4,406
| 4.521053
| 0.182456
| 0.041909
| 0.032596
| 0.034148
| 0.793946
| 0.787738
| 0.772216
| 0.772216
| 0.772216
| 0.772216
| 0
| 0.017231
| 0.26237
| 4,406
| 137
| 109
| 32.160584
| 0.775692
| 0.113709
| 0
| 0.72093
| 0
| 0
| 0.063641
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.209302
| false
| 0.023256
| 0.05814
| 0.093023
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.